From 2cfc056ef2c28b4961bff5e2f6deed94afb14024 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Sat, 6 Mar 2021 19:24:18 +0800 Subject: remoteproc: introduce is_iomem to rproc_mem_entry Introduce is_iomem to indicate this piece memory is iomem or not. Reviewed-by: Bjorn Andersson Reviewed-by: Mathieu Poirier Signed-off-by: Peng Fan Link: https://lore.kernel.org/r/1615029865-23312-4-git-send-email-peng.fan@oss.nxp.com Signed-off-by: Bjorn Andersson --- include/linux/remoteproc.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index f28ee75d1005..a5f6d2d9cde2 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -315,6 +315,7 @@ struct rproc; /** * struct rproc_mem_entry - memory entry descriptor * @va: virtual address + * @is_iomem: io memory * @dma: dma address * @len: length, in bytes * @da: device address @@ -329,6 +330,7 @@ struct rproc; */ struct rproc_mem_entry { void *va; + bool is_iomem; dma_addr_t dma; size_t len; u32 da; -- cgit v1.2.3-71-gd317 From 40df0a91b2a5228ded8e5f75b80d28c96c6831cd Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Sat, 6 Mar 2021 19:24:19 +0800 Subject: remoteproc: add is_iomem to da_to_va Introduce an extra parameter is_iomem to da_to_va, then the caller could take the memory as normal memory or io mapped memory. Reviewed-by: Bjorn Andersson Reviewed-by: Mathieu Poirier Reported-by: kernel test robot Signed-off-by: Peng Fan Link: https://lore.kernel.org/r/1615029865-23312-5-git-send-email-peng.fan@oss.nxp.com Signed-off-by: Bjorn Andersson --- drivers/remoteproc/imx_rproc.c | 2 +- drivers/remoteproc/ingenic_rproc.c | 2 +- drivers/remoteproc/keystone_remoteproc.c | 2 +- drivers/remoteproc/mtk_scp.c | 6 +++--- drivers/remoteproc/omap_remoteproc.c | 2 +- drivers/remoteproc/pru_rproc.c | 2 +- drivers/remoteproc/qcom_q6v5_adsp.c | 2 +- drivers/remoteproc/qcom_q6v5_pas.c | 2 +- drivers/remoteproc/qcom_q6v5_wcss.c | 2 +- drivers/remoteproc/qcom_wcnss.c | 2 +- drivers/remoteproc/remoteproc_core.c | 7 +++++-- drivers/remoteproc/remoteproc_coredump.c | 8 ++++++-- drivers/remoteproc/remoteproc_debugfs.c | 2 +- drivers/remoteproc/remoteproc_elf_loader.c | 21 +++++++++++++++------ drivers/remoteproc/remoteproc_internal.h | 2 +- drivers/remoteproc/st_slim_rproc.c | 2 +- drivers/remoteproc/ti_k3_dsp_remoteproc.c | 2 +- drivers/remoteproc/ti_k3_r5_remoteproc.c | 2 +- drivers/remoteproc/wkup_m3_rproc.c | 2 +- include/linux/remoteproc.h | 2 +- 20 files changed, 45 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 8957ed271d20..6603e00bb6f4 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -208,7 +208,7 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da, return -ENOENT; } -static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct imx_rproc *priv = rproc->priv; void *va = NULL; diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c index e2618c36eaab..a356738160a4 100644 --- a/drivers/remoteproc/ingenic_rproc.c +++ b/drivers/remoteproc/ingenic_rproc.c @@ -121,7 +121,7 @@ static void ingenic_rproc_kick(struct rproc *rproc, int vqid) writel(vqid, vpu->aux_base + REG_CORE_MSG); } -static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct vpu *vpu = rproc->priv; void __iomem *va = NULL; diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c index cd266163a65f..54781f553f4e 100644 --- a/drivers/remoteproc/keystone_remoteproc.c +++ b/drivers/remoteproc/keystone_remoteproc.c @@ -246,7 +246,7 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid) * can be used either by the remoteproc core for loading (when using kernel * remoteproc loader), or by any rpmsg bus drivers. */ -static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct keystone_rproc *ksproc = rproc->priv; void __iomem *va = NULL; diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c index ce727598c41c..9679cc26895e 100644 --- a/drivers/remoteproc/mtk_scp.c +++ b/drivers/remoteproc/mtk_scp.c @@ -272,7 +272,7 @@ static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw) } /* grab the kernel address for this device address */ - ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz); + ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL); if (!ptr) { dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz); ret = -EINVAL; @@ -509,7 +509,7 @@ static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len) return NULL; } -static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct mtk_scp *scp = (struct mtk_scp *)rproc->priv; @@ -627,7 +627,7 @@ void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr) { void *ptr; - ptr = scp_da_to_va(scp->rproc, mem_addr, 0); + ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL); if (!ptr) return ERR_PTR(-EINVAL); diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c index d94b7391bf9d..43531caa1959 100644 --- a/drivers/remoteproc/omap_remoteproc.c +++ b/drivers/remoteproc/omap_remoteproc.c @@ -728,7 +728,7 @@ out: * Return: translated virtual address in kernel memory space on success, * or NULL on failure. */ -static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct omap_rproc *oproc = rproc->priv; int i; diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c index 16b204ed657b..d6086f90e809 100644 --- a/drivers/remoteproc/pru_rproc.c +++ b/drivers/remoteproc/pru_rproc.c @@ -465,7 +465,7 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len) * core for any PRU client drivers. The PRU Instruction RAM access is restricted * only to the PRU loader code. */ -static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct pru_rproc *pru = rproc->priv; diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index e02450225e4a..8b0d8bbacd2e 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -281,7 +281,7 @@ static int adsp_stop(struct rproc *rproc) return ret; } -static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; int offset; diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index e635454d6170..ef85b5511dc9 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -242,7 +242,7 @@ static int adsp_stop(struct rproc *rproc) return ret; } -static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; int offset; diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c index 78ebe1168b33..704cd63c9af4 100644 --- a/drivers/remoteproc/qcom_q6v5_wcss.c +++ b/drivers/remoteproc/qcom_q6v5_wcss.c @@ -410,7 +410,7 @@ static int q6v5_wcss_stop(struct rproc *rproc) return 0; } -static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct q6v5_wcss *wcss = rproc->priv; int offset; diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index 2a6a23cb14ca..3a131163064c 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -320,7 +320,7 @@ static int wcnss_stop(struct rproc *rproc) return ret; } -static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; int offset; diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 553e42a4d2a0..5071cdbfc926 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -189,13 +189,13 @@ EXPORT_SYMBOL(rproc_va_to_pa); * here the output of the DMA API for the carveouts, which should be more * correct. */ -void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct rproc_mem_entry *carveout; void *ptr = NULL; if (rproc->ops->da_to_va) { - ptr = rproc->ops->da_to_va(rproc, da, len); + ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem); if (ptr) goto out; } @@ -217,6 +217,9 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) ptr = carveout->va + offset; + if (is_iomem) + *is_iomem = carveout->is_iomem; + break; } diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c index 81ec154a6a5e..aee657cc08c6 100644 --- a/drivers/remoteproc/remoteproc_coredump.c +++ b/drivers/remoteproc/remoteproc_coredump.c @@ -153,18 +153,22 @@ static void rproc_copy_segment(struct rproc *rproc, void *dest, size_t offset, size_t size) { void *ptr; + bool is_iomem; if (segment->dump) { segment->dump(rproc, segment, dest, offset, size); } else { - ptr = rproc_da_to_va(rproc, segment->da + offset, size); + ptr = rproc_da_to_va(rproc, segment->da + offset, size, &is_iomem); if (!ptr) { dev_err(&rproc->dev, "invalid copy request for segment %pad with offset %zu and size %zu)\n", &segment->da, offset, size); memset(dest, 0xff, size); } else { - memcpy(dest, ptr, size); + if (is_iomem) + memcpy_fromio(dest, ptr, size); + else + memcpy(dest, ptr, size); } } } diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 7e5845376e9f..b5a1e3b697d9 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -132,7 +132,7 @@ static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf, char buf[100]; int len; - va = rproc_da_to_va(data->rproc, trace->da, trace->len); + va = rproc_da_to_va(data->rproc, trace->da, trace->len, NULL); if (!va) { len = scnprintf(buf, sizeof(buf), "Trace %s not available\n", diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c index df68d87752e4..11423588965a 100644 --- a/drivers/remoteproc/remoteproc_elf_loader.c +++ b/drivers/remoteproc/remoteproc_elf_loader.c @@ -175,6 +175,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) u64 offset = elf_phdr_get_p_offset(class, phdr); u32 type = elf_phdr_get_p_type(class, phdr); void *ptr; + bool is_iomem; if (type != PT_LOAD) continue; @@ -204,7 +205,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) } /* grab the kernel address for this device address */ - ptr = rproc_da_to_va(rproc, da, memsz); + ptr = rproc_da_to_va(rproc, da, memsz, &is_iomem); if (!ptr) { dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da, memsz); @@ -213,8 +214,12 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) } /* put the segment where the remote processor expects it */ - if (filesz) - memcpy(ptr, elf_data + offset, filesz); + if (filesz) { + if (is_iomem) + memcpy_fromio(ptr, (void __iomem *)(elf_data + offset), filesz); + else + memcpy(ptr, elf_data + offset, filesz); + } /* * Zero out remaining memory for this segment. @@ -223,8 +228,12 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) * did this for us. albeit harmless, we may consider removing * this. */ - if (memsz > filesz) - memset(ptr + filesz, 0, memsz - filesz); + if (memsz > filesz) { + if (is_iomem) + memset_io((void __iomem *)(ptr + filesz), 0, memsz - filesz); + else + memset(ptr + filesz, 0, memsz - filesz); + } } return ret; @@ -377,6 +386,6 @@ struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc, return NULL; } - return rproc_da_to_va(rproc, sh_addr, sh_size); + return rproc_da_to_va(rproc, sh_addr, sh_size, NULL); } EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table); diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index c34002888d2c..9ea37aa687d2 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -84,7 +84,7 @@ static inline void rproc_char_device_remove(struct rproc *rproc) void rproc_free_vring(struct rproc_vring *rvring); int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); -void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len); +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); phys_addr_t rproc_va_to_pa(void *cpu_addr); int rproc_trigger_recovery(struct rproc *rproc); diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c index 09bcb4d8b9e0..22096adc1ad3 100644 --- a/drivers/remoteproc/st_slim_rproc.c +++ b/drivers/remoteproc/st_slim_rproc.c @@ -174,7 +174,7 @@ static int slim_rproc_stop(struct rproc *rproc) return 0; } -static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct st_slim_rproc *slim_rproc = rproc->priv; void *va = NULL; diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c index 863c0214e0a8..fd4eb67a6681 100644 --- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -354,7 +354,7 @@ static int k3_dsp_rproc_stop(struct rproc *rproc) * can be used either by the remoteproc core for loading (when using kernel * remoteproc loader), or by any rpmsg bus drivers. */ -static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct k3_dsp_rproc *kproc = rproc->priv; void __iomem *va = NULL; diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 62b5a4c29456..5cf8d030a1f0 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -590,7 +590,7 @@ out: * present in a DSP or IPU device). The translated addresses can be used * either by the remoteproc core for loading, or by any rpmsg bus drivers. */ -static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_core *core = kproc->core; diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c index 92d387dfc03b..484f7605823e 100644 --- a/drivers/remoteproc/wkup_m3_rproc.c +++ b/drivers/remoteproc/wkup_m3_rproc.c @@ -89,7 +89,7 @@ static int wkup_m3_rproc_stop(struct rproc *rproc) return error; } -static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct wkup_m3_rproc *wkupm3 = rproc->priv; void *va = NULL; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index a5f6d2d9cde2..1b7d56c7a453 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -386,7 +386,7 @@ struct rproc_ops { int (*stop)(struct rproc *rproc); int (*attach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); - void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); + void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc, int offset, int avail); -- cgit v1.2.3-71-gd317 From 7852546f524595245382a919e752468f73421451 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Thu, 4 Mar 2021 14:45:15 +0200 Subject: RDMA/mlx5: Fix query RoCE port mlx5_is_roce_enabled returns the devlink RoCE init value, therefore it should be used only when driver is loaded. Instead we just need to read the roce_en field. In addition, rename mlx5_is_roce_enabled to mlx5_is_roce_init_enabled. Fixes: 7a58779edd75 ("IB/mlx5: Improve query port for representor port") Link: https://lore.kernel.org/r/20210304124517.1100608-2-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 6 +++--- include/linux/mlx5/driver.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 0d69a697d75f..4be7bccefaa4 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -499,7 +499,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, translate_eth_proto_oper(eth_prot_oper, &props->active_speed, &props->active_width, ext); - if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) { + if (!dev->is_rep && dev->mdev->roce.roce_en) { u16 qkey_viol_cntr; props->port_cap_flags |= IB_PORT_CM_SUP; @@ -4174,7 +4174,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev) /* Register only for native ports */ err = mlx5_add_netdev_notifier(dev, port_num); - if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev)) + if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev)) /* * We don't enable ETH interface for * 1. IB representors @@ -4711,7 +4711,7 @@ static int mlx5r_probe(struct auxiliary_device *adev, dev->mdev = mdev; dev->num_ports = num_ports; - if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev)) + if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev)) profile = &raw_eth_profile; else profile = &pf_profile; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 53b89631a1d9..ab07f09f2bad 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1226,7 +1226,7 @@ enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; -static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) +static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev) { struct devlink *devlink = priv_to_devlink(dev); union devlink_param_value val; -- cgit v1.2.3-71-gd317 From 4260c4067fbba55a90037fe3ee32eff087749f83 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 24 Feb 2021 13:03:13 -0600 Subject: f2fs: Replace one-element array with flexible-array member MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a regular need in the kernel to provide a way to declare having a dynamically sized set of trailing elements in a structure. Kernel code should always use “flexible array members”[1] for these cases. The older style of one-element or zero-length arrays should no longer be used[2]. Refactor the code according to the use of a flexible-array member in struct f2fs_checkpoint, instead of a one-element arrays. Notice that a temporary pointer to void '*tmp_ptr' was used in order to fix the following errors when using a flexible array instead of a one element array in struct f2fs_checkpoint: CC [M] fs/f2fs/dir.o In file included from fs/f2fs/dir.c:13: fs/f2fs/f2fs.h: In function ‘__bitmap_ptr’: fs/f2fs/f2fs.h:2227:40: error: invalid use of flexible array member 2227 | return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); | ^ fs/f2fs/f2fs.h:2227:49: error: invalid use of flexible array member 2227 | return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); | ^ fs/f2fs/f2fs.h:2238:40: error: invalid use of flexible array member 2238 | return &ckpt->sit_nat_version_bitmap + offset; | ^ make[2]: *** [scripts/Makefile.build:287: fs/f2fs/dir.o] Error 1 make[1]: *** [scripts/Makefile.build:530: fs/f2fs] Error 2 make: *** [Makefile:1819: fs] Error 2 [1] https://en.wikipedia.org/wiki/Flexible_array_member [2] https://www.kernel.org/doc/html/v5.9/process/deprecated.html#zero-length-and-one-element-arrays Link: https://github.com/KSPP/linux/issues/79 Build-tested-by: kernel test robot Link: https://lore.kernel.org/lkml/603647e4.DeEFbl4eqljuwAUe%25lkp@intel.com/ Signed-off-by: Gustavo A. R. Silva Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/f2fs.h | 5 +++-- include/linux/f2fs_fs.h | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index cccdfb1a40ab..99e243fd26d5 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2215,6 +2215,7 @@ static inline block_t __cp_payload(struct f2fs_sb_info *sbi) static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + void *tmp_ptr = &ckpt->sit_nat_version_bitmap; int offset; if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { @@ -2224,7 +2225,7 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) * if large_nat_bitmap feature is enabled, leave checksum * protection for all nat/sit bitmaps. */ - return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); + return tmp_ptr + offset + sizeof(__le32); } if (__cp_payload(sbi) > 0) { @@ -2235,7 +2236,7 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) } else { offset = (flag == NAT_BITMAP) ? le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; - return &ckpt->sit_nat_version_bitmap + offset; + return tmp_ptr + offset; } } diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index c6cc0a566ef5..5487a80617a3 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -168,7 +168,7 @@ struct f2fs_checkpoint { unsigned char alloc_type[MAX_ACTIVE_LOGS]; /* SIT and NAT version bitmap */ - unsigned char sit_nat_version_bitmap[1]; + unsigned char sit_nat_version_bitmap[]; } __packed; #define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */ -- cgit v1.2.3-71-gd317 From 1bb73841ea7a88765db7f641a90120490f1f4aee Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 2 Mar 2021 07:21:33 +0100 Subject: PCI: Remove MicroGate SyncLink device IDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The drivers were removed in a1f714b44e34 (tty: Remove redundant synclink driver) and 3d608a591b2b (tty: Remove redundant synclinkmp driver). Remove the PCI device ID entries as well. Link: https://lore.kernel.org/r/20210302062214.29627-3-jslaby@suse.cz Signed-off-by: Jiri Slaby Signed-off-by: Bjorn Helgaas Reviewed-by: Krzysztof Wilczyński --- include/linux/pci_ids.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index a76ccb697bef..8a18517696c1 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2065,8 +2065,6 @@ #define PCI_DEVICE_ID_EXAR_XR17V358 0x0358 #define PCI_VENDOR_ID_MICROGATE 0x13c0 -#define PCI_DEVICE_ID_MICROGATE_USC 0x0010 -#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030 #define PCI_VENDOR_ID_3WARE 0x13C1 #define PCI_DEVICE_ID_3WARE_1000 0x1000 -- cgit v1.2.3-71-gd317 From 58483761810087e5ffdf36e84ac1bf26df909097 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Sun, 14 Mar 2021 12:13:29 +0100 Subject: thermal/drivers/core: Use a char pointer for the cooling device name We want to have any kind of name for the cooling devices as we do no longer want to rely on auto-numbering. Let's replace the cooling device's fixed array by a char pointer to be allocated dynamically when registering the cooling device, so we don't limit the length of the name. Rework the error path at the same time as we have to rollback the allocations in case of error. Tested with a dummy device having the name: "Llanfairpwllgwyngyllgogerychwyrndrobwllllantysiliogogogoch" A village on the island of Anglesey (Wales), known to have the longest name in Europe. Signed-off-by: Daniel Lezcano Reviewed-by: Lukasz Luba Tested-by: Ido Schimmel Link: https://lore.kernel.org/r/20210314111333.16551-1-daniel.lezcano@linaro.org --- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 2 +- drivers/thermal/thermal_core.c | 38 +++++++++++++--------- include/linux/thermal.h | 2 +- 3 files changed, 24 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index bf85ce9835d7..7447c2a73cbd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -141,7 +141,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, /* Allow mlxsw thermal zone binding to an external cooling device */ for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) { if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i], - sizeof(cdev->type))) + strlen(cdev->type))) return 0; } diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 996c038f83a4..c8d4010940ef 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -960,10 +960,7 @@ __thermal_cooling_device_register(struct device_node *np, { struct thermal_cooling_device *cdev; struct thermal_zone_device *pos = NULL; - int result; - - if (type && strlen(type) >= THERMAL_NAME_LENGTH) - return ERR_PTR(-EINVAL); + int ret; if (!ops || !ops->get_max_state || !ops->get_cur_state || !ops->set_cur_state) @@ -973,14 +970,17 @@ __thermal_cooling_device_register(struct device_node *np, if (!cdev) return ERR_PTR(-ENOMEM); - result = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL); - if (result < 0) { - kfree(cdev); - return ERR_PTR(result); + ret = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL); + if (ret < 0) + goto out_kfree_cdev; + cdev->id = ret; + + cdev->type = kstrdup(type ? type : "", GFP_KERNEL); + if (!cdev->type) { + ret = -ENOMEM; + goto out_ida_remove; } - cdev->id = result; - strlcpy(cdev->type, type ? : "", sizeof(cdev->type)); mutex_init(&cdev->lock); INIT_LIST_HEAD(&cdev->thermal_instances); cdev->np = np; @@ -990,12 +990,9 @@ __thermal_cooling_device_register(struct device_node *np, cdev->devdata = devdata; thermal_cooling_device_setup_sysfs(cdev); dev_set_name(&cdev->device, "cooling_device%d", cdev->id); - result = device_register(&cdev->device); - if (result) { - ida_simple_remove(&thermal_cdev_ida, cdev->id); - put_device(&cdev->device); - return ERR_PTR(result); - } + ret = device_register(&cdev->device); + if (ret) + goto out_kfree_type; /* Add 'this' new cdev to the global cdev list */ mutex_lock(&thermal_list_lock); @@ -1013,6 +1010,14 @@ __thermal_cooling_device_register(struct device_node *np, mutex_unlock(&thermal_list_lock); return cdev; + +out_kfree_type: + kfree(cdev->type); + put_device(&cdev->device); +out_ida_remove: + ida_simple_remove(&thermal_cdev_ida, cdev->id); +out_kfree_cdev: + return ERR_PTR(ret); } /** @@ -1171,6 +1176,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) ida_simple_remove(&thermal_cdev_ida, cdev->id); device_del(&cdev->device); thermal_cooling_device_destroy_sysfs(cdev); + kfree(cdev->type); put_device(&cdev->device); } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 6ac7bb1d2b1f..169502164364 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -91,7 +91,7 @@ struct thermal_cooling_device_ops { struct thermal_cooling_device { int id; - char type[THERMAL_NAME_LENGTH]; + char *type; struct device device; struct device_node *np; void *devdata; -- cgit v1.2.3-71-gd317 From eedb0b12d091a21909b5e84d9f3e5e649305bd12 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 28 Jan 2021 14:53:22 +0100 Subject: dma-mapping: add a dma_mmap_pages helper Add a helper to map memory allocated using dma_alloc_pages into a user address space, similar to the dma_alloc_attrs function for coherent allocations. Signed-off-by: Christoph Hellwig Reviewed-by: Tomasz Figa Tested-by: Ricardo Ribalda --- Documentation/core-api/dma-api.rst | 10 ++++++++++ include/linux/dma-mapping.h | 2 ++ kernel/dma/mapping.c | 13 +++++++++++++ 3 files changed, 25 insertions(+) (limited to 'include/linux') diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst index e6d23f117308..157a474ae544 100644 --- a/Documentation/core-api/dma-api.rst +++ b/Documentation/core-api/dma-api.rst @@ -563,6 +563,16 @@ Free a region of memory previously allocated using dma_alloc_pages(). dev, size, dma_handle and dir must all be the same as those passed into dma_alloc_pages(). page must be the pointer returned by dma_alloc_pages(). +:: + + int + dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, + size_t size, struct page *page) + +Map an allocation returned from dma_alloc_pages() into a user address space. +dev and size must be the same as those passed into dma_alloc_pages(). +page must be the pointer returned by dma_alloc_pages(). + :: void * diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2a984cb4d1e0..2b8dce756e1f 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -263,6 +263,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void dma_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle, enum dma_data_direction dir); +int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, + size_t size, struct page *page); static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index b6a633679933..9ce86c77651c 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -517,6 +517,19 @@ void dma_free_pages(struct device *dev, size_t size, struct page *page, } EXPORT_SYMBOL_GPL(dma_free_pages); +int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, + size_t size, struct page *page) +{ + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) + return -ENXIO; + return remap_pfn_range(vma, vma->vm_start, + page_to_pfn(page) + vma->vm_pgoff, + vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); +} +EXPORT_SYMBOL_GPL(dma_mmap_pages); + int dma_supported(struct device *dev, u64 mask) { const struct dma_map_ops *ops = get_dma_ops(dev); -- cgit v1.2.3-71-gd317 From 7d5b5738d1514e9dd8ed452660e2a4d25beb9483 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 28 Jan 2021 14:54:18 +0100 Subject: dma-mapping: add a dma_alloc_noncontiguous API Add a new API that returns a potentiall virtually non-contigous sg_table and a DMA address. This API is only properly implemented for dma-iommu and will simply return a contigious chunk as a fallback. The intent is that drivers can use this API if either: - no kernel mapping or only temporary kernel mappings are required. That is as a better replacement for DMA_ATTR_NO_KERNEL_MAPPING - a kernel mapping is required for cached and DMA mapped pages, but the driver also needs the pages to e.g. map them to userspace. In that sense it is a replacement for some aspects of the recently removed and never fully implemented DMA_ATTR_NON_CONSISTENT Signed-off-by: Christoph Hellwig Reviewed-by: Tomasz Figa Tested-by: Ricardo Ribalda --- Documentation/core-api/dma-api.rst | 78 +++++++++++++++++++++++++++ include/linux/dma-map-ops.h | 19 +++++++ include/linux/dma-mapping.h | 32 +++++++++++ kernel/dma/mapping.c | 106 +++++++++++++++++++++++++++++++++++++ 4 files changed, 235 insertions(+) (limited to 'include/linux') diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst index 157a474ae544..00a1d4fa3f9e 100644 --- a/Documentation/core-api/dma-api.rst +++ b/Documentation/core-api/dma-api.rst @@ -594,6 +594,84 @@ dev, size, dma_handle and dir must all be the same as those passed into dma_alloc_noncoherent(). cpu_addr must be the virtual address returned by dma_alloc_noncoherent(). +:: + + struct sg_table * + dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs); + +This routine allocates bytes of non-coherent and possibly non-contiguous +memory. It returns a pointer to struct sg_table that describes the allocated +and DMA mapped memory, or NULL if the allocation failed. The resulting memory +can be used for struct page mapped into a scatterlist are suitable for. + +The return sg_table is guaranteed to have 1 single DMA mapped segment as +indicated by sgt->nents, but it might have multiple CPU side segments as +indicated by sgt->orig_nents. + +The dir parameter specified if data is read and/or written by the device, +see dma_map_single() for details. + +The gfp parameter allows the caller to specify the ``GFP_`` flags (see +kmalloc()) for the allocation, but rejects flags used to specify a memory +zone such as GFP_DMA or GFP_HIGHMEM. + +The attrs argument must be either 0 or DMA_ATTR_ALLOC_SINGLE_PAGES. + +Before giving the memory to the device, dma_sync_sgtable_for_device() needs +to be called, and before reading memory written by the device, +dma_sync_sgtable_for_cpu(), just like for streaming DMA mappings that are +reused. + +:: + + void + dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, + enum dma_data_direction dir) + +Free memory previously allocated using dma_alloc_noncontiguous(). dev, size, +and dir must all be the same as those passed into dma_alloc_noncontiguous(). +sgt must be the pointer returned by dma_alloc_noncontiguous(). + +:: + + void * + dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt) + +Return a contiguous kernel mapping for an allocation returned from +dma_alloc_noncontiguous(). dev and size must be the same as those passed into +dma_alloc_noncontiguous(). sgt must be the pointer returned by +dma_alloc_noncontiguous(). + +Once a non-contiguous allocation is mapped using this function, the +flush_kernel_vmap_range() and invalidate_kernel_vmap_range() APIs must be used +to manage the coherency between the kernel mapping, the device and user space +mappings (if any). + +:: + + void + dma_vunmap_noncontiguous(struct device *dev, void *vaddr) + +Unmap a kernel mapping returned by dma_vmap_noncontiguous(). dev must be the +same the one passed into dma_alloc_noncontiguous(). vaddr must be the pointer +returned by dma_vmap_noncontiguous(). + + +:: + + int + dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt) + +Map an allocation returned from dma_alloc_noncontiguous() into a user address +space. dev and size must be the same as those passed into +dma_alloc_noncontiguous(). sgt must be the pointer returned by +dma_alloc_noncontiguous(). + :: int diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 51872e736e7b..0d53a96a3d64 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -22,6 +22,11 @@ struct dma_map_ops { gfp_t gfp); void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir); + struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs); + void (*free_noncontiguous)(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, unsigned long attrs); @@ -198,6 +203,20 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, } #endif /* CONFIG_DMA_DECLARE_COHERENT */ +/* + * This is the actual return value from the ->alloc_noncontiguous method. + * The users of the DMA API should only care about the sg_table, but to make + * the DMA-API internal vmaping and freeing easier we stash away the page + * array as well (except for the fallback case). This can go away any time, + * e.g. when a vmap-variant that takes a scatterlist comes along. + */ +struct dma_sgt_handle { + struct sg_table sgt; + struct page **pages; +}; +#define sgt_handle(sgt) \ + container_of((sgt), struct dma_sgt_handle, sgt) + int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2b8dce756e1f..954847f9a3e0 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -144,6 +144,15 @@ u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); unsigned long dma_get_merge_boundary(struct device *dev); +struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); +void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir); +void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt); +void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); +int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt); #else /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, @@ -257,6 +266,29 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev) { return 0; } +static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + return NULL; +} +static inline void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ +} +static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt) +{ + return NULL; +} +static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) +{ +} +static inline int dma_mmap_noncontiguous(struct device *dev, + struct vm_area_struct *vma, size_t size, struct sg_table *sgt) +{ + return -EINVAL; +} #endif /* CONFIG_HAS_DMA */ struct page *dma_alloc_pages(struct device *dev, size_t size, diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 07f964ebcda1..2b06a809d0b9 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -539,6 +539,112 @@ int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, } EXPORT_SYMBOL_GPL(dma_mmap_pages); +static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp) +{ + struct sg_table *sgt; + struct page *page; + + sgt = kmalloc(sizeof(*sgt), gfp); + if (!sgt) + return NULL; + if (sg_alloc_table(sgt, 1, gfp)) + goto out_free_sgt; + page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); + if (!page) + goto out_free_table; + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + sg_dma_len(sgt->sgl) = sgt->sgl->length; + return sgt; +out_free_table: + sg_free_table(sgt); +out_free_sgt: + kfree(sgt); + return NULL; +} + +struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + struct sg_table *sgt; + + if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES)) + return NULL; + + if (ops && ops->alloc_noncontiguous) + sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); + else + sgt = alloc_single_sgt(dev, size, dir, gfp); + + if (sgt) { + sgt->nents = 1; + debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir); + } + return sgt; +} +EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); + +static void free_single_sgt(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, + dir); + sg_free_table(sgt); + kfree(sgt); +} + +void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); + if (ops && ops->free_noncontiguous) + ops->free_noncontiguous(dev, size, sgt, dir); + else + free_single_sgt(dev, size, sgt, dir); +} +EXPORT_SYMBOL_GPL(dma_free_noncontiguous); + +void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (ops && ops->alloc_noncontiguous) + return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); + return page_address(sg_page(sgt->sgl)); +} +EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous); + +void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops && ops->alloc_noncontiguous) + vunmap(vaddr); +} +EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous); + +int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops && ops->alloc_noncontiguous) { + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (vma->vm_pgoff >= count || + vma_pages(vma) > count - vma->vm_pgoff) + return -ENXIO; + return vm_map_pages(vma, sgt_handle(sgt)->pages, count); + } + return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); +} +EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); + int dma_supported(struct device *dev, u64 mask) { const struct dma_map_ops *ops = get_dma_ops(dev); -- cgit v1.2.3-71-gd317 From c124fd9a969acaa83f6dfa5e160a99a500af9e4b Mon Sep 17 00:00:00 2001 From: Gustavo Pimentel Date: Thu, 18 Feb 2021 20:03:58 +0100 Subject: PCI: Add pci_find_vsec_capability() to find a specific VSEC Add pci_find_vsec_capability() to locate a Vendor-Specific Extended Capability with the specified VSEC ID. The Vendor-Specific Extended Capability (VSEC) allows one or more proprietary capabilities defined by the vendor which aren't standard or shared between vendors. Signed-off-by: Gustavo Pimentel Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/d89506834fb11c6fa0bd5d515c0dd55b13ac6958.1613674948.git.gustavo.pimentel@synopsys.com Signed-off-by: Vinod Koul --- drivers/pci/pci.c | 30 ++++++++++++++++++++++++++++++ include/linux/pci.h | 1 + 2 files changed, 31 insertions(+) (limited to 'include/linux') diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 16a17215f633..97c0f2ec922b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -692,6 +692,36 @@ u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap) } EXPORT_SYMBOL_GPL(pci_find_ht_capability); +/** + * pci_find_vsec_capability - Find a vendor-specific extended capability + * @dev: PCI device to query + * @vendor: Vendor ID for which capability is defined + * @cap: Vendor-specific capability ID + * + * If @dev has Vendor ID @vendor, search for a VSEC capability with + * VSEC ID @cap. If found, return the capability offset in + * config space; otherwise return 0. + */ +u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap) +{ + u16 vsec = 0; + u32 header; + + if (vendor != dev->vendor) + return 0; + + while ((vsec = pci_find_next_ext_capability(dev, vsec, + PCI_EXT_CAP_ID_VNDR))) { + if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, + &header) == PCIBIOS_SUCCESSFUL && + PCI_VNDR_HEADER_ID(header) == cap) + return vsec; + } + + return 0; +} +EXPORT_SYMBOL_GPL(pci_find_vsec_capability); + /** * pci_find_parent_resource - return resource region of parent bus of given * region diff --git a/include/linux/pci.h b/include/linux/pci.h index 86c799c97b77..59e731a2d1f8 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1077,6 +1077,7 @@ u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap); u16 pci_find_ext_capability(struct pci_dev *dev, int cap); u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from); +u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap); u64 pci_get_dsn(struct pci_dev *dev); -- cgit v1.2.3-71-gd317 From 2973073a80b46daebc352c31d09d95d16cf6876e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 1 Mar 2021 08:44:24 +0100 Subject: swiotlb: remove the alloc_size parameter to swiotlb_tbl_unmap_single Now that swiotlb remembers the allocation size there is no need to pass it back to swiotlb_tbl_unmap_single. Signed-off-by: Christoph Hellwig Signed-off-by: Konrad Rzeszutek Wilk --- drivers/iommu/dma-iommu.c | 11 +++-------- drivers/xen/swiotlb-xen.c | 4 ++-- include/linux/swiotlb.h | 1 - kernel/dma/direct.h | 2 +- kernel/dma/swiotlb.c | 45 +++++++++++++++++++++++---------------------- 5 files changed, 29 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index af765c813cc8..9149597410e2 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -499,8 +499,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(dev); - struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iova_domain *iovad = &cookie->iovad; phys_addr_t phys; phys = iommu_iova_to_phys(domain, dma_addr); @@ -510,8 +508,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, __iommu_dma_unmap(dev, dma_addr, size); if (unlikely(is_swiotlb_buffer(phys))) - swiotlb_tbl_unmap_single(dev, phys, size, - iova_align(iovad, size), dir, attrs); + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, @@ -581,10 +578,8 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys, } iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask); - if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys)) - swiotlb_tbl_unmap_single(dev, phys, org_size, - aligned_size, dir, attrs); - + if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys)) + swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs); return iova; } diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 2b385c1b4a99..d47f1b311caa 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -406,7 +406,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * Ensure that the address returned is DMA'ble */ if (unlikely(!dma_capable(dev, dev_addr, size, true))) { - swiotlb_tbl_unmap_single(dev, map, size, size, dir, + swiotlb_tbl_unmap_single(dev, map, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; } @@ -445,7 +445,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(hwdev, dev_addr)) - swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); + swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); } static void diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5857a937c637..59f421d041ed 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -57,7 +57,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t mapping_size, - size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index b98615578737..e1bf721591c0 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -114,6 +114,6 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, dma_direct_sync_single_for_cpu(dev, addr, size, dir); if (unlikely(is_swiotlb_buffer(phys))) - swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); } #endif /* _KERNEL_DMA_DIRECT_H */ diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index c10e855a03bc..03aa614565e4 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -102,7 +102,7 @@ static phys_addr_t *io_tlb_orig_addr; /* * The mapped buffer's size should be validated during a sync operation. */ -static size_t *io_tlb_orig_size; +static size_t *io_tlb_alloc_size; /* * Protect the above data structures in the map and unmap calls @@ -253,15 +253,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) __func__, alloc_size, PAGE_SIZE); alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t)); - io_tlb_orig_size = memblock_alloc(alloc_size, PAGE_SIZE); - if (!io_tlb_orig_size) + io_tlb_alloc_size = memblock_alloc(alloc_size, PAGE_SIZE); + if (!io_tlb_alloc_size) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); for (i = 0; i < io_tlb_nslabs; i++) { io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - io_tlb_orig_size[i] = 0; + io_tlb_alloc_size[i] = 0; } io_tlb_index = 0; no_iotlb_memory = false; @@ -393,18 +393,18 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!io_tlb_orig_addr) goto cleanup4; - io_tlb_orig_size = (size_t *) + io_tlb_alloc_size = (size_t *) __get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(size_t))); - if (!io_tlb_orig_size) + if (!io_tlb_alloc_size) goto cleanup5; for (i = 0; i < io_tlb_nslabs; i++) { io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - io_tlb_orig_size[i] = 0; + io_tlb_alloc_size[i] = 0; } io_tlb_index = 0; no_iotlb_memory = false; @@ -436,7 +436,7 @@ void __init swiotlb_exit(void) return; if (late_alloc) { - free_pages((unsigned long)io_tlb_orig_size, + free_pages((unsigned long)io_tlb_alloc_size, get_order(io_tlb_nslabs * sizeof(size_t))); free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); @@ -447,7 +447,7 @@ void __init swiotlb_exit(void) } else { memblock_free_late(__pa(io_tlb_orig_addr), PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); - memblock_free_late(__pa(io_tlb_orig_size), + memblock_free_late(__pa(io_tlb_alloc_size), PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t))); memblock_free_late(__pa(io_tlb_list), PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); @@ -639,7 +639,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, */ for (i = 0; i < nr_slots(alloc_size + offset); i++) { io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i); - io_tlb_orig_size[index+i] = alloc_size - (i << IO_TLB_SHIFT); + io_tlb_alloc_size[index+i] = alloc_size - (i << IO_TLB_SHIFT); } tlb_addr = slot_addr(io_tlb_start, index) + offset; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && @@ -648,14 +648,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, return tlb_addr; } -static void validate_sync_size_and_truncate(struct device *hwdev, size_t orig_size, size_t *size) +static void validate_sync_size_and_truncate(struct device *hwdev, size_t alloc_size, size_t *size) { - if (*size > orig_size) { + if (*size > alloc_size) { /* Warn and truncate mapping_size */ dev_WARN_ONCE(hwdev, 1, "Attempt for buffer overflow. Original size: %zu. Mapping size: %zu.\n", - orig_size, *size); - *size = orig_size; + alloc_size, *size); + *size = alloc_size; } } @@ -663,16 +663,17 @@ static void validate_sync_size_and_truncate(struct device *hwdev, size_t orig_si * tlb_addr is the physical address of the bounce buffer to unmap. */ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t mapping_size, size_t alloc_size, - enum dma_data_direction dir, unsigned long attrs) + size_t mapping_size, enum dma_data_direction dir, + unsigned long attrs) { unsigned long flags; unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr); - int i, count, nslots = nr_slots(alloc_size + offset); int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; + size_t alloc_size = io_tlb_alloc_size[index]; + int i, count, nslots = nr_slots(alloc_size + offset); - validate_sync_size_and_truncate(hwdev, io_tlb_orig_size[index], &mapping_size); + validate_sync_size_and_truncate(hwdev, alloc_size, &mapping_size); /* * First, sync the memory before unmapping the entry @@ -701,7 +702,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, for (i = index + nslots - 1; i >= index; i--) { io_tlb_list[i] = ++count; io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - io_tlb_orig_size[i] = 0; + io_tlb_alloc_size[i] = 0; } /* @@ -721,13 +722,13 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, enum dma_sync_target target) { int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; - size_t orig_size = io_tlb_orig_size[index]; + size_t alloc_size = io_tlb_alloc_size[index]; phys_addr_t orig_addr = io_tlb_orig_addr[index]; if (orig_addr == INVALID_PHYS_ADDR) return; - validate_sync_size_and_truncate(hwdev, orig_size, &size); + validate_sync_size_and_truncate(hwdev, alloc_size, &size); switch (target) { case SYNC_FOR_CPU: @@ -770,7 +771,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, /* Ensure that the address returned is DMA'ble */ dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr); if (unlikely(!dma_capable(dev, dma_addr, size, true))) { - swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir, + swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); dev_WARN_ONCE(dev, 1, "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", -- cgit v1.2.3-71-gd317 From 80808d273a3f075196d1a26463f65d4c9d2891c8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 1 Mar 2021 08:44:26 +0100 Subject: swiotlb: split swiotlb_tbl_sync_single Split swiotlb_tbl_sync_single into two separate funtions for the to device and to cpu synchronization. Signed-off-by: Christoph Hellwig Signed-off-by: Konrad Rzeszutek Wilk --- drivers/iommu/dma-iommu.c | 12 ++++++------ drivers/xen/swiotlb-xen.c | 4 ++-- include/linux/swiotlb.h | 17 ++++------------- kernel/dma/direct.c | 8 ++++---- kernel/dma/direct.h | 4 ++-- kernel/dma/swiotlb.c | 34 +++++++++++++++------------------- 6 files changed, 33 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9149597410e2..3087d9fa6065 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -750,7 +750,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, arch_sync_dma_for_cpu(phys, size, dir); if (is_swiotlb_buffer(phys)) - swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, phys, size, dir); } static void iommu_dma_sync_single_for_device(struct device *dev, @@ -763,7 +763,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); if (is_swiotlb_buffer(phys)) - swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, phys, size, dir); if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(phys, size, dir); @@ -784,8 +784,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); if (is_swiotlb_buffer(sg_phys(sg))) - swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, - dir, SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, sg_phys(sg), + sg->length, dir); } } @@ -801,8 +801,8 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, for_each_sg(sgl, sg, nelems, i) { if (is_swiotlb_buffer(sg_phys(sg))) - swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, - dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, sg_phys(sg), + sg->length, dir); if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index d47f1b311caa..4e8a4e14942a 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -462,7 +462,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, } if (is_xen_swiotlb_buffer(dev, dma_addr)) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, paddr, size, dir); } static void @@ -472,7 +472,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); if (is_xen_swiotlb_buffer(dev, dma_addr)) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, paddr, size, dir); if (!dev_is_dma_coherent(dev)) { if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 59f421d041ed..0696bdc8072e 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -42,14 +42,6 @@ extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); extern int swiotlb_late_init_with_default_size(size_t default_size); extern void __init swiotlb_update_mem_attributes(void); -/* - * Enumeration for sync targets - */ -enum dma_sync_target { - SYNC_FOR_CPU = 0, - SYNC_FOR_DEVICE = 1, -}; - phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); @@ -60,11 +52,10 @@ extern void swiotlb_tbl_unmap_single(struct device *hwdev, enum dma_data_direction dir, unsigned long attrs); -extern void swiotlb_tbl_sync_single(struct device *hwdev, - phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - enum dma_sync_target target); - +void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir); +void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir); dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 002268262c9a..f737e3347059 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -344,8 +344,8 @@ void dma_direct_sync_sg_for_device(struct device *dev, phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, sg->length, - dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, paddr, sg->length, + dir); if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(paddr, sg->length, @@ -370,8 +370,8 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, arch_sync_dma_for_cpu(paddr, sg->length, dir); if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, sg->length, dir, - SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, paddr, sg->length, + dir); if (dir == DMA_FROM_DEVICE) arch_dma_mark_clean(paddr, sg->length); diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index e1bf721591c0..50afc05b6f1d 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -57,7 +57,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev, phys_addr_t paddr = dma_to_phys(dev, addr); if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, paddr, size, dir); if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(paddr, size, dir); @@ -74,7 +74,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev, } if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, paddr, size, dir); if (dir == DMA_FROM_DEVICE) arch_dma_mark_clean(paddr, size); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index a431c6b64e82..5fe8781be6f2 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -715,26 +715,22 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, spin_unlock_irqrestore(&io_tlb_lock, flags); } -void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - enum dma_sync_target target) +void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir) { - switch (target) { - case SYNC_FOR_CPU: - if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(hwdev, tlb_addr, size, DMA_FROM_DEVICE); - else - BUG_ON(dir != DMA_TO_DEVICE); - break; - case SYNC_FOR_DEVICE: - if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(hwdev, tlb_addr, size, DMA_TO_DEVICE); - else - BUG_ON(dir != DMA_FROM_DEVICE); - break; - default: - BUG(); - } + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); + else + BUG_ON(dir != DMA_FROM_DEVICE); +} + +void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir) +{ + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); + else + BUG_ON(dir != DMA_TO_DEVICE); } /* -- cgit v1.2.3-71-gd317 From 3093c3c7c136458af692d5c3d309a66c3c12d9f4 Mon Sep 17 00:00:00 2001 From: Arnaud Pouliquen Date: Thu, 11 Mar 2021 15:04:09 +0100 Subject: rpmsg: Move RPMSG_ADDR_ANY in user API As the RPMSG_ADDR_ANY is a valid src or dst address that can be set by user applications, migrate its definition in user API. Reviewed-by: Bjorn Andersson Reviewed-by: Mathieu Poirier Signed-off-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20210311140413.31725-3-arnaud.pouliquen@foss.st.com Signed-off-by: Bjorn Andersson --- include/linux/rpmsg.h | 3 +-- include/uapi/linux/rpmsg.h | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index a5db828b2420..d97dcd049f18 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -18,8 +18,7 @@ #include #include #include - -#define RPMSG_ADDR_ANY 0xFFFFFFFF +#include struct rpmsg_device; struct rpmsg_endpoint; diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h index e14c6dab4223..5e00748da319 100644 --- a/include/uapi/linux/rpmsg.h +++ b/include/uapi/linux/rpmsg.h @@ -9,6 +9,8 @@ #include #include +#define RPMSG_ADDR_ANY 0xFFFFFFFF + /** * struct rpmsg_endpoint_info - endpoint info representation * @name: name of service -- cgit v1.2.3-71-gd317 From 3542dcb15cef66c0b9e6c3b33168eb657e0d9520 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 5 Mar 2021 16:32:34 +0000 Subject: iommu/dma: Resurrect the "forcedac" option In converting intel-iommu over to the common IOMMU DMA ops, it quietly lost the functionality of its "forcedac" option. Since this is a handy thing both for testing and for performance optimisation on certain platforms, reimplement it under the common IOMMU parameter namespace. For the sake of fixing the inadvertent breakage of the Intel-specific parameter, remove the dmar_forcedac remnants and hook it up as an alias while documenting the transition to the new common parameter. Fixes: c588072bba6b ("iommu/vt-d: Convert intel iommu driver to the iommu ops") Signed-off-by: Robin Murphy Acked-by: Lu Baolu Reviewed-by: John Garry Link: https://lore.kernel.org/r/7eece8e0ea7bfbe2cd0e30789e0d46df573af9b0.1614961776.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- Documentation/admin-guide/kernel-parameters.txt | 15 ++++++++------- drivers/iommu/dma-iommu.c | 13 ++++++++++++- drivers/iommu/intel/iommu.c | 5 ++--- include/linux/dma-iommu.h | 2 ++ 4 files changed, 24 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 04545725f187..835f810f2f26 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1869,13 +1869,6 @@ bypassed by not enabling DMAR with this option. In this case, gfx device will use physical address for DMA. - forcedac [X86-64] - With this option iommu will not optimize to look - for io virtual address below 32-bit forcing dual - address cycle on pci bus for cards supporting greater - than 32-bit addressing. The default is to look - for translation below 32-bit and if not available - then look in the higher range. strict [Default Off] With this option on every unmap_single operation will result in a hardware IOTLB flush operation as opposed @@ -1964,6 +1957,14 @@ nobypass [PPC/POWERNV] Disable IOMMU bypass, using IOMMU for PCI devices. + iommu.forcedac= [ARM64, X86] Control IOVA allocation for PCI devices. + Format: { "0" | "1" } + 0 - Try to allocate a 32-bit DMA address first, before + falling back to the full range if needed. + 1 - Allocate directly from the full usable range, + forcing Dual Address Cycle for PCI cards supporting + greater than 32-bit addressing. + iommu.strict= [ARM64] Configure TLB invalidation behaviour Format: { "0" | "1" } 0 - Lazy mode. diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index af765c813cc8..fdd095e1fa52 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -52,6 +52,17 @@ struct iommu_dma_cookie { }; static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); +bool iommu_dma_forcedac __read_mostly; + +static int __init iommu_dma_forcedac_setup(char *str) +{ + int ret = kstrtobool(str, &iommu_dma_forcedac); + + if (!ret && iommu_dma_forcedac) + pr_info("Forcing DAC for PCI devices\n"); + return ret; +} +early_param("iommu.forcedac", iommu_dma_forcedac_setup); void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, struct iommu_domain *domain) @@ -444,7 +455,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); /* Try to get PCI devices a SAC address */ - if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) + if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift, false); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index ee0932307d64..1c32522220bc 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -360,7 +360,6 @@ int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); static int dmar_map_gfx = 1; -static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; @@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str) dmar_map_gfx = 0; pr_info("Disable GFX device mapping\n"); } else if (!strncmp(str, "forcedac", 8)) { - pr_info("Forcing DAC for PCI devices\n"); - dmar_forcedac = 1; + pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); + iommu_dma_forcedac = true; } else if (!strncmp(str, "strict", 6)) { pr_info("Disable batched IOTLB flush\n"); intel_iommu_strict = 1; diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 706b68d1359b..13d1f4c14d7b 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, struct iommu_domain *domain); +extern bool iommu_dma_forcedac; + #else /* CONFIG_IOMMU_DMA */ struct iommu_domain; -- cgit v1.2.3-71-gd317 From 6ca69e5841f01ccbfa45e56577e1b33e14e53504 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Thu, 18 Mar 2021 08:53:40 +0800 Subject: iommu/vt-d: Report more information about invalidation errors When the invalidation queue errors are encountered, dump the information logged by the VT-d hardware together with the pending queue invalidation descriptors. Signed-off-by: Ashok Raj Tested-by: Guo Kaijie Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20210318005340.187311-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/dmar.c | 68 ++++++++++++++++++++++++++++++++++++++++++--- include/linux/intel-iommu.h | 6 ++++ 2 files changed, 70 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index d5c51b5c20af..6971397805f3 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1205,6 +1205,63 @@ static inline void reclaim_free_desc(struct q_inval *qi) } } +static const char *qi_type_string(u8 type) +{ + switch (type) { + case QI_CC_TYPE: + return "Context-cache Invalidation"; + case QI_IOTLB_TYPE: + return "IOTLB Invalidation"; + case QI_DIOTLB_TYPE: + return "Device-TLB Invalidation"; + case QI_IEC_TYPE: + return "Interrupt Entry Cache Invalidation"; + case QI_IWD_TYPE: + return "Invalidation Wait"; + case QI_EIOTLB_TYPE: + return "PASID-based IOTLB Invalidation"; + case QI_PC_TYPE: + return "PASID-cache Invalidation"; + case QI_DEIOTLB_TYPE: + return "PASID-based Device-TLB Invalidation"; + case QI_PGRP_RESP_TYPE: + return "Page Group Response"; + default: + return "UNKNOWN"; + } +} + +static void qi_dump_fault(struct intel_iommu *iommu, u32 fault) +{ + unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG); + u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG); + struct qi_desc *desc = iommu->qi->desc + head; + + if (fault & DMA_FSTS_IQE) + pr_err("VT-d detected Invalidation Queue Error: Reason %llx", + DMAR_IQER_REG_IQEI(iqe_err)); + if (fault & DMA_FSTS_ITE) + pr_err("VT-d detected Invalidation Time-out Error: SID %llx", + DMAR_IQER_REG_ITESID(iqe_err)); + if (fault & DMA_FSTS_ICE) + pr_err("VT-d detected Invalidation Completion Error: SID %llx", + DMAR_IQER_REG_ICESID(iqe_err)); + + pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n", + qi_type_string(desc->qw0 & 0xf), + (unsigned long long)desc->qw0, + (unsigned long long)desc->qw1); + + head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH; + head <<= qi_shift(iommu); + desc = iommu->qi->desc + head; + + pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n", + qi_type_string(desc->qw0 & 0xf), + (unsigned long long)desc->qw0, + (unsigned long long)desc->qw1); +} + static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index) { u32 fault; @@ -1216,6 +1273,8 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index) return -EAGAIN; fault = readl(iommu->reg + DMAR_FSTS_REG); + if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE)) + qi_dump_fault(iommu, fault); /* * If IQE happens, the head points to the descriptor associated @@ -1232,12 +1291,10 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index) * used by software as private data. We won't print * out these two qw's for security consideration. */ - pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n", - (unsigned long long)desc->qw0, - (unsigned long long)desc->qw1); memcpy(desc, qi->desc + (wait_index << shift), 1 << shift); writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); + pr_info("Invalidation Queue Error (IQE) cleared\n"); return -EINVAL; } } @@ -1254,6 +1311,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index) tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH; writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); + pr_info("Invalidation Time-out Error (ITE) cleared\n"); do { if (qi->desc_status[head] == QI_IN_USE) @@ -1265,8 +1323,10 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index) return -EAGAIN; } - if (fault & DMA_FSTS_ICE) + if (fault & DMA_FSTS_ICE) { writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); + pr_info("Invalidation Completion Error (ICE) cleared\n"); + } return 0; } diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1bc46b88711a..1732298ce888 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -80,6 +81,7 @@ #define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ +#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ #define DMAR_PQH_REG 0xc0 /* Page request queue head register */ #define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ @@ -126,6 +128,10 @@ #define DMAR_VCMD_REG 0xe10 /* Virtual command register */ #define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */ +#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) +#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) +#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg) + #define OFFSET_STRIDE (9) #define dmar_readq(a) readq(a) -- cgit v1.2.3-71-gd317 From 4196d18903f94090f0a223d65de25e3bf50a3d13 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Fri, 12 Mar 2021 09:24:39 -0700 Subject: remoteproc: Add new RPROC_ATTACHED state Add a new RPROC_ATTACHED state to take into account scenarios where the remoteproc core needs to attach to a remote processor that is booted by another entity. Signed-off-by: Mathieu Poirier Reviewed-by: Peng Fan Reviewed-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20210312162453.1234145-4-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_sysfs.c | 1 + include/linux/remoteproc.h | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 6840dad931d5..26fd9ceecdb5 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -172,6 +172,7 @@ static const char * const rproc_state_string[] = { [RPROC_RUNNING] = "running", [RPROC_CRASHED] = "crashed", [RPROC_DELETED] = "deleted", + [RPROC_ATTACHED] = "attached", [RPROC_DETACHED] = "detached", [RPROC_LAST] = "invalid", }; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 1b7d56c7a453..9193a8fb5b68 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -407,6 +407,8 @@ struct rproc_ops { * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery * @RPROC_DELETED: device is deleted + * @RPROC_ATTACHED: device has been booted by another entity and the core + * has attached to it * @RPROC_DETACHED: device has been booted by another entity and waiting * for the core to attach to it * @RPROC_LAST: just keep this one at the end @@ -423,8 +425,9 @@ enum rproc_state { RPROC_RUNNING = 2, RPROC_CRASHED = 3, RPROC_DELETED = 4, - RPROC_DETACHED = 5, - RPROC_LAST = 6, + RPROC_ATTACHED = 5, + RPROC_DETACHED = 6, + RPROC_LAST = 7, }; /** -- cgit v1.2.3-71-gd317 From 76f4c87587e2ff41e9b9867ffde2137f27ba39b9 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Fri, 12 Mar 2021 09:24:40 -0700 Subject: remoteproc: Properly represent the attached state There is a need to know when a remote processor has been attached to rather than booted by the remoteproc core. In order to avoid manipulating two variables, i.e rproc::autonomous and rproc::state, get rid of the former and simply use the newly introduced RPROC_ATTACHED state. Signed-off-by: Mathieu Poirier Reviewed-by: Peng Fan Reviewed-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20210312162453.1234145-5-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 20 +------------------- drivers/remoteproc/remoteproc_sysfs.c | 5 +---- include/linux/remoteproc.h | 2 -- 3 files changed, 2 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 9127b2aa5424..d828c01f4f8a 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1450,7 +1450,7 @@ static int __rproc_attach(struct rproc *rproc) goto stop_rproc; } - rproc->state = RPROC_RUNNING; + rproc->state = RPROC_ATTACHED; dev_info(dev, "remote processor %s is now attached\n", rproc->name); @@ -1665,14 +1665,6 @@ static int rproc_stop(struct rproc *rproc, bool crashed) rproc->state = RPROC_OFFLINE; - /* - * The remote processor has been stopped and is now offline, which means - * that the next time it is brought back online the remoteproc core will - * be responsible to load its firmware. As such it is no longer - * autonomous. - */ - rproc->autonomous = false; - dev_info(dev, "stopped remote processor %s\n", rproc->name); return 0; @@ -2083,16 +2075,6 @@ int rproc_add(struct rproc *rproc) if (ret < 0) return ret; - /* - * Remind ourselves the remote processor has been attached to rather - * than booted by the remoteproc core. This is important because the - * RPROC_DETACHED state will be lost as soon as the remote processor - * has been attached to. Used in firmware_show() and reset in - * rproc_stop(). - */ - if (rproc->state == RPROC_DETACHED) - rproc->autonomous = true; - /* if rproc is marked always-on, request it to boot */ if (rproc->auto_boot) { ret = rproc_trigger_auto_boot(rproc); diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 26fd9ceecdb5..4f58be1e13c1 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -138,11 +138,8 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, * If the remote processor has been started by an external * entity we have no idea of what image it is running. As such * simply display a generic string rather then rproc->firmware. - * - * Here we rely on the autonomous flag because a remote processor - * may have been attached to and currently in a running state. */ - if (rproc->autonomous) + if (rproc->state == RPROC_ATTACHED) firmware = "unknown"; return sprintf(buf, "%s\n", firmware); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 9193a8fb5b68..9e42e90cd9da 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -514,7 +514,6 @@ struct rproc_dump_segment { * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU * @auto_boot: flag to indicate if remote processor should be auto-started - * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc * @char_dev: character device of the rproc @@ -551,7 +550,6 @@ struct rproc { size_t table_sz; bool has_iommu; bool auto_boot; - bool autonomous; struct list_head dump_segments; int nb_vdev; u8 elf_class; -- cgit v1.2.3-71-gd317 From 1a631382be1d22ddab0582dae3498b3d28e2e44a Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Fri, 12 Mar 2021 09:24:41 -0700 Subject: remoteproc: Add new get_loaded_rsc_table() to rproc_ops Add a new get_loaded_rsc_table() operation in order to support scenarios where the remoteproc core has booted a remote processor and detaches from it. When re-attaching to the remote processor, the core needs to know where the resource table has been placed in memory. Signed-off-by: Mathieu Poirier Reviewed-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20210312162453.1234145-6-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 32 ++++++++++++++++++++++++++++++++ drivers/remoteproc/remoteproc_internal.h | 10 ++++++++++ include/linux/remoteproc.h | 6 +++++- 3 files changed, 47 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index d828c01f4f8a..c7e1cf1b6f66 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1543,6 +1543,32 @@ disable_iommu: return ret; } +static int rproc_set_rsc_table(struct rproc *rproc) +{ + struct resource_table *table_ptr; + struct device *dev = &rproc->dev; + size_t table_sz; + int ret; + + table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz); + if (!table_ptr) { + /* Not having a resource table is acceptable */ + return 0; + } + + if (IS_ERR(table_ptr)) { + ret = PTR_ERR(table_ptr); + dev_err(dev, "can't load resource table: %d\n", ret); + return ret; + } + + rproc->cached_table = NULL; + rproc->table_ptr = table_ptr; + rproc->table_sz = table_sz; + + return 0; +} + /* * Attach to remote processor - similar to rproc_fw_boot() but without * the steps that deal with the firmware image. @@ -1562,6 +1588,12 @@ static int rproc_attach(struct rproc *rproc) return ret; } + ret = rproc_set_rsc_table(rproc); + if (ret) { + dev_err(dev, "can't load resource table: %d\n", ret); + goto disable_iommu; + } + /* reset max_notifyid */ rproc->max_notifyid = -1; diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index 9ea37aa687d2..a328e634b1de 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -177,6 +177,16 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc, return NULL; } +static inline +struct resource_table *rproc_get_loaded_rsc_table(struct rproc *rproc, + size_t *size) +{ + if (rproc->ops->get_loaded_rsc_table) + return rproc->ops->get_loaded_rsc_table(rproc, size); + + return NULL; +} + static inline bool rproc_u64_fit_in_size_t(u64 val) { diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 9e42e90cd9da..eee338177a3d 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -370,7 +370,9 @@ enum rsc_handling_status { * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a * negative value on error * @load_rsc_table: load resource table from firmware image - * @find_loaded_rsc_table: find the loaded resouce table + * @find_loaded_rsc_table: find the loaded resource table from firmware image + * @get_loaded_rsc_table: get resource table installed in memory + * by external entity * @load: load firmware to memory, where the remote processor * expects to find it * @sanity_check: sanity check the fw image @@ -392,6 +394,8 @@ struct rproc_ops { int offset, int avail); struct resource_table *(*find_loaded_rsc_table)( struct rproc *rproc, const struct firmware *fw); + struct resource_table *(*get_loaded_rsc_table)( + struct rproc *rproc, size_t *size); int (*load)(struct rproc *rproc, const struct firmware *fw); int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); -- cgit v1.2.3-71-gd317 From 7f3bd0c019cb813448d867c17c9b9dad205a13eb Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Fri, 12 Mar 2021 09:24:44 -0700 Subject: remoteproc: Add new detach() remoteproc operation Add an new detach() operation in order to support scenarios where the remoteproc core is going away but the remote processor is kept operating. This could be the case when the system is rebooted or when the platform driver is removed. Signed-off-by: Mathieu Poirier Reviewed-by: Peng Fan Reviewed-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20210312162453.1234145-9-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- include/linux/remoteproc.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index eee338177a3d..2f1f0fbc3994 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -363,6 +363,7 @@ enum rsc_handling_status { * @start: power on the device and boot it * @stop: power off the device * @attach: attach to a device that his already powered up + * @detach: detach from a device, leaving it powered up * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) @@ -387,6 +388,7 @@ struct rproc_ops { int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); int (*attach)(struct rproc *rproc); + int (*detach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); -- cgit v1.2.3-71-gd317 From d3962a397885518a85d2dc6b0c51e6594f71c30f Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Fri, 12 Mar 2021 09:24:46 -0700 Subject: remoteproc: Introduce function rproc_detach() Introduce function rproc_detach() to enable the remoteproc core to release the resources associated with a remote processor without stopping its operation. Signed-off-by: Mathieu Poirier Reviewed-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20210312162453.1234145-11-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 58 +++++++++++++++++++++++++++++++++++- include/linux/remoteproc.h | 1 + 2 files changed, 58 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 23e9040386c2..78a36a3723ec 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1715,7 +1715,7 @@ static int rproc_stop(struct rproc *rproc, bool crashed) /* * __rproc_detach(): Does the opposite of __rproc_attach() */ -static int __maybe_unused __rproc_detach(struct rproc *rproc) +static int __rproc_detach(struct rproc *rproc) { struct device *dev = &rproc->dev; int ret; @@ -1954,6 +1954,62 @@ out: } EXPORT_SYMBOL(rproc_shutdown); +/** + * rproc_detach() - Detach the remote processor from the + * remoteproc core + * + * @rproc: the remote processor + * + * Detach a remote processor (previously attached to with rproc_attach()). + * + * In case @rproc is still being used by an additional user(s), then + * this function will just decrement the power refcount and exit, + * without disconnecting the device. + * + * Function rproc_detach() calls __rproc_detach() in order to let a remote + * processor know that services provided by the application processor are + * no longer available. From there it should be possible to remove the + * platform driver and even power cycle the application processor (if the HW + * supports it) without needing to switch off the remote processor. + */ +int rproc_detach(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + ret = mutex_lock_interruptible(&rproc->lock); + if (ret) { + dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); + return ret; + } + + /* if the remote proc is still needed, bail out */ + if (!atomic_dec_and_test(&rproc->power)) { + ret = 0; + goto out; + } + + ret = __rproc_detach(rproc); + if (ret) { + atomic_inc(&rproc->power); + goto out; + } + + /* clean up all acquired resources */ + rproc_resource_cleanup(rproc); + + /* release HW resources if needed */ + rproc_unprepare_device(rproc); + + rproc_disable_iommu(rproc); + + rproc->table_ptr = NULL; +out: + mutex_unlock(&rproc->lock); + return ret; +} +EXPORT_SYMBOL(rproc_detach); + /** * rproc_get_by_phandle() - find a remote processor by phandle * @phandle: phandle to the rproc diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 2f1f0fbc3994..fc2cca600423 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -664,6 +664,7 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, int rproc_boot(struct rproc *rproc); void rproc_shutdown(struct rproc *rproc); +int rproc_detach(struct rproc *rproc); int rproc_set_firmware(struct rproc *rproc, const char *fw_name); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); void rproc_coredump_using_sections(struct rproc *rproc); -- cgit v1.2.3-71-gd317 From 9dc9507f1880fb6225e3e058cb5219b152cbf198 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Fri, 12 Mar 2021 09:24:47 -0700 Subject: remoteproc: Properly deal with the resource table when detaching If it is possible to detach the remote processor, keep an untouched copy of the resource table. That way we can start from the same resource table without having to worry about original values or what elements the startup code has changed when re-attaching to the remote processor. Signed-off-by: Mathieu Poirier Reviewed-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20210312162453.1234145-12-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 81 ++++++++++++++++++++++++++++++++++++ include/linux/remoteproc.h | 3 ++ 2 files changed, 84 insertions(+) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 78a36a3723ec..57c851d22ba6 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1562,6 +1562,24 @@ static int rproc_set_rsc_table(struct rproc *rproc) return ret; } + /* + * If it is possible to detach the remote processor, keep an untouched + * copy of the resource table. That way we can start fresh again when + * the remote processor is re-attached, that is: + * + * DETACHED -> ATTACHED -> DETACHED -> ATTACHED + * + * Free'd in rproc_reset_rsc_table_on_detach() and + * rproc_reset_rsc_table_on_stop(). + */ + if (rproc->ops->detach) { + rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL); + if (!rproc->clean_table) + return -ENOMEM; + } else { + rproc->clean_table = NULL; + } + rproc->cached_table = NULL; rproc->table_ptr = table_ptr; rproc->table_sz = table_sz; @@ -1569,6 +1587,59 @@ static int rproc_set_rsc_table(struct rproc *rproc) return 0; } +static int rproc_reset_rsc_table_on_detach(struct rproc *rproc) +{ + struct resource_table *table_ptr; + + /* A resource table was never retrieved, nothing to do here */ + if (!rproc->table_ptr) + return 0; + + /* + * If we made it to this point a clean_table _must_ have been + * allocated in rproc_set_rsc_table(). If one isn't present + * something went really wrong and we must complain. + */ + if (WARN_ON(!rproc->clean_table)) + return -EINVAL; + + /* Remember where the external entity installed the resource table */ + table_ptr = rproc->table_ptr; + + /* + * If we made it here the remote processor was started by another + * entity and a cache table doesn't exist. As such make a copy of + * the resource table currently used by the remote processor and + * use that for the rest of the shutdown process. The memory + * allocated here is free'd in rproc_detach(). + */ + rproc->cached_table = kmemdup(rproc->table_ptr, + rproc->table_sz, GFP_KERNEL); + if (!rproc->cached_table) + return -ENOMEM; + + /* + * Use a copy of the resource table for the remainder of the + * shutdown process. + */ + rproc->table_ptr = rproc->cached_table; + + /* + * Reset the memory area where the firmware loaded the resource table + * to its original value. That way when we re-attach the remote + * processor the resource table is clean and ready to be used again. + */ + memcpy(table_ptr, rproc->clean_table, rproc->table_sz); + + /* + * The clean resource table is no longer needed. Allocated in + * rproc_set_rsc_table(). + */ + kfree(rproc->clean_table); + + return 0; +} + /* * Attach to remote processor - similar to rproc_fw_boot() but without * the steps that deal with the firmware image. @@ -1727,6 +1798,13 @@ static int __rproc_detach(struct rproc *rproc) /* Stop any subdevices for the remote processor */ rproc_stop_subdevices(rproc, false); + /* the installed resource table is no longer accessible */ + ret = rproc_reset_rsc_table_on_detach(rproc); + if (ret) { + dev_err(dev, "can't reset resource table: %d\n", ret); + return ret; + } + /* Tell the remote processor the core isn't available anymore */ ret = rproc->ops->detach(rproc); if (ret) { @@ -2003,6 +2081,9 @@ int rproc_detach(struct rproc *rproc) rproc_disable_iommu(rproc); + /* Free the copy of the resource table */ + kfree(rproc->cached_table); + rproc->cached_table = NULL; rproc->table_ptr = NULL; out: mutex_unlock(&rproc->lock); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index fc2cca600423..8b795b544f75 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -516,6 +516,8 @@ struct rproc_dump_segment { * @recovery_disabled: flag that state if recovery was disabled * @max_notifyid: largest allocated notify id. * @table_ptr: pointer to the resource table in effect + * @clean_table: copy of the resource table without modifications. Used + * when a remote processor is attached or detached from the core * @cached_table: copy of the resource table * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU @@ -552,6 +554,7 @@ struct rproc { bool recovery_disabled; int max_notifyid; struct resource_table *table_ptr; + struct resource_table *clean_table; struct resource_table *cached_table; size_t table_sz; bool has_iommu; -- cgit v1.2.3-71-gd317 From b47e330231acbf4506b049643145cc64268a1940 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 16 Mar 2021 12:41:03 -0400 Subject: tracing: Pass buffer of event to trigger operations The ring_buffer_event_time_stamp() is going to be updated to extract the time stamp for the event without needing it to be set to have absolute values for all events. But to do so, it needs the buffer that the event is on as the buffer saves information for the event before it is committed to the buffer. If the trace buffer is disabled, a temporary buffer is used, and there's no access to this buffer from the current histogram triggers, even though it is passed to the trace event code. Pass the buffer that the event is on all the way down to the histogram triggers. Link: https://lkml.kernel.org/r/20210316164113.542448131@goodmis.org Reviewed-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 5 +- kernel/trace/trace.c | 2 +- kernel/trace/trace.h | 4 +- kernel/trace/trace_events_hist.c | 92 ++++++++++++++++++++++++------------- kernel/trace/trace_events_trigger.c | 45 +++++++++++------- 5 files changed, 95 insertions(+), 53 deletions(-) (limited to 'include/linux') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 28e7af1406f2..8cba64ce23a4 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -640,7 +640,8 @@ enum event_trigger_type { extern int filter_match_preds(struct event_filter *filter, void *rec); extern enum event_trigger_type -event_triggers_call(struct trace_event_file *file, void *rec, +event_triggers_call(struct trace_event_file *file, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event); extern void event_triggers_post_call(struct trace_event_file *file, @@ -664,7 +665,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { if (eflags & EVENT_FILE_FL_TRIGGER_MODE) - event_triggers_call(file, NULL, NULL); + event_triggers_call(file, NULL, NULL, NULL); if (eflags & EVENT_FILE_FL_SOFT_DISABLED) return true; if (eflags & EVENT_FILE_FL_PID_FILTER) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index eccb4e1187cc..f979220238a5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6763,7 +6763,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { /* do not add \n before testing triggers, but add \0 */ entry->buf[cnt] = '\0'; - tt = event_triggers_call(tr->trace_marker_file, entry, event); + tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); } if (entry->buf[cnt - 1] != '\n') { diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a6446c03cfbc..798773178d7e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1329,7 +1329,7 @@ __event_trigger_test_discard(struct trace_event_file *file, unsigned long eflags = file->flags; if (eflags & EVENT_FILE_FL_TRIGGER_COND) - *tt = event_triggers_call(file, entry, event); + *tt = event_triggers_call(file, buffer, entry, event); if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && @@ -1626,7 +1626,7 @@ extern int register_trigger_hist_enable_disable_cmds(void); */ struct event_trigger_ops { void (*func)(struct event_trigger_data *data, - void *rec, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe); int (*init)(struct event_trigger_ops *ops, struct event_trigger_data *data); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 39ebe1826fc3..6978aa3ee4c5 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -81,6 +81,7 @@ struct hist_field; typedef u64 (*hist_field_fn_t) (struct hist_field *field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event); @@ -153,6 +154,7 @@ struct hist_field { static u64 hist_field_none(struct hist_field *field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { @@ -161,6 +163,7 @@ static u64 hist_field_none(struct hist_field *field, static u64 hist_field_counter(struct hist_field *field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { @@ -169,6 +172,7 @@ static u64 hist_field_counter(struct hist_field *field, static u64 hist_field_string(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { @@ -179,6 +183,7 @@ static u64 hist_field_string(struct hist_field *hist_field, static u64 hist_field_dynstring(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { @@ -191,6 +196,7 @@ static u64 hist_field_dynstring(struct hist_field *hist_field, static u64 hist_field_pstring(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { @@ -201,52 +207,56 @@ static u64 hist_field_pstring(struct hist_field *hist_field, static u64 hist_field_log2(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { struct hist_field *operand = hist_field->operands[0]; - u64 val = operand->fn(operand, elt, rbe, event); + u64 val = operand->fn(operand, elt, buffer, rbe, event); return (u64) ilog2(roundup_pow_of_two(val)); } static u64 hist_field_plus(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { struct hist_field *operand1 = hist_field->operands[0]; struct hist_field *operand2 = hist_field->operands[1]; - u64 val1 = operand1->fn(operand1, elt, rbe, event); - u64 val2 = operand2->fn(operand2, elt, rbe, event); + u64 val1 = operand1->fn(operand1, elt, buffer, rbe, event); + u64 val2 = operand2->fn(operand2, elt, buffer, rbe, event); return val1 + val2; } static u64 hist_field_minus(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { struct hist_field *operand1 = hist_field->operands[0]; struct hist_field *operand2 = hist_field->operands[1]; - u64 val1 = operand1->fn(operand1, elt, rbe, event); - u64 val2 = operand2->fn(operand2, elt, rbe, event); + u64 val1 = operand1->fn(operand1, elt, buffer, rbe, event); + u64 val2 = operand2->fn(operand2, elt, buffer, rbe, event); return val1 - val2; } static u64 hist_field_unary_minus(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { struct hist_field *operand = hist_field->operands[0]; - s64 sval = (s64)operand->fn(operand, elt, rbe, event); + s64 sval = (s64)operand->fn(operand, elt, buffer, rbe, event); u64 val = (u64)-sval; return val; @@ -255,6 +265,7 @@ static u64 hist_field_unary_minus(struct hist_field *hist_field, #define DEFINE_HIST_FIELD_FN(type) \ static u64 hist_field_##type(struct hist_field *hist_field, \ struct tracing_map_elt *elt, \ + struct trace_buffer *buffer, \ struct ring_buffer_event *rbe, \ void *event) \ { \ @@ -380,7 +391,8 @@ struct hist_trigger_data { struct action_data; typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe, void *key, struct action_data *data, u64 *var_ref_vals); @@ -608,7 +620,8 @@ static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, } static void action_trace(struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe, void *key, struct action_data *data, u64 *var_ref_vals) { @@ -624,6 +637,7 @@ struct hist_var_data { static u64 hist_field_timestamp(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { @@ -640,6 +654,7 @@ static u64 hist_field_timestamp(struct hist_field *hist_field, static u64 hist_field_cpu(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { @@ -1020,6 +1035,7 @@ static struct hist_field *find_event_var(struct hist_trigger_data *hist_data, static u64 hist_field_var_ref(struct hist_field *hist_field, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *event) { @@ -2561,6 +2577,7 @@ find_target_event_var(struct hist_trigger_data *hist_data, } static inline void __update_field_vars(struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *rec, struct field_var **field_vars, @@ -2576,7 +2593,7 @@ static inline void __update_field_vars(struct tracing_map_elt *elt, struct hist_field *var = field_var->var; struct hist_field *val = field_var->val; - var_val = val->fn(val, elt, rbe, rec); + var_val = val->fn(val, elt, buffer, rbe, rec); var_idx = var->var.idx; if (val->flags & HIST_FIELD_FL_STRING) { @@ -2592,19 +2609,21 @@ static inline void __update_field_vars(struct tracing_map_elt *elt, static void update_field_vars(struct hist_trigger_data *hist_data, struct tracing_map_elt *elt, + struct trace_buffer *buffer, struct ring_buffer_event *rbe, void *rec) { - __update_field_vars(elt, rbe, rec, hist_data->field_vars, + __update_field_vars(elt, buffer, rbe, rec, hist_data->field_vars, hist_data->n_field_vars, 0); } static void save_track_data_vars(struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe, void *key, struct action_data *data, u64 *var_ref_vals) { - __update_field_vars(elt, rbe, rec, hist_data->save_vars, + __update_field_vars(elt, buffer, rbe, rec, hist_data->save_vars, hist_data->n_save_vars, hist_data->n_field_var_str); } @@ -2780,12 +2799,14 @@ static void save_track_val(struct hist_trigger_data *hist_data, } static void save_track_data(struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe, void *key, struct action_data *data, u64 *var_ref_vals) { if (data->track_data.save_data) - data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); + data->track_data.save_data(hist_data, elt, buffer, rec, rbe, + key, data, var_ref_vals); } static bool check_track_val(struct tracing_map_elt *elt, @@ -2836,7 +2857,8 @@ static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) } static void save_track_data_snapshot(struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe, void *key, struct action_data *data, u64 *var_ref_vals) @@ -2905,7 +2927,8 @@ static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) return false; } static void save_track_data_snapshot(struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe, void *key, struct action_data *data, u64 *var_ref_vals) {} @@ -2947,7 +2970,8 @@ static void track_data_print(struct seq_file *m, } static void ontrack_action(struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe, void *key, struct action_data *data, u64 *var_ref_vals) { @@ -2955,7 +2979,8 @@ static void ontrack_action(struct hist_trigger_data *hist_data, if (check_track_val(elt, data, var_val)) { save_track_val(hist_data, elt, data, var_val); - save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); + save_track_data(hist_data, elt, buffer, rec, rbe, + key, data, var_ref_vals); } } @@ -4400,7 +4425,8 @@ create_hist_data(unsigned int map_bits, } static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe, u64 *var_ref_vals) { @@ -4414,7 +4440,7 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, for_each_hist_val_field(i, hist_data) { hist_field = hist_data->fields[i]; - hist_val = hist_field->fn(hist_field, elt, rbe, rec); + hist_val = hist_field->fn(hist_field, elt, buffer, rbe, rec); if (hist_field->flags & HIST_FIELD_FL_VAR) { var_idx = hist_field->var.idx; @@ -4442,13 +4468,13 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, for_each_hist_key_field(i, hist_data) { hist_field = hist_data->fields[i]; if (hist_field->flags & HIST_FIELD_FL_VAR) { - hist_val = hist_field->fn(hist_field, elt, rbe, rec); + hist_val = hist_field->fn(hist_field, elt, buffer, rbe, rec); var_idx = hist_field->var.idx; tracing_map_set_var(elt, var_idx, hist_val); } } - update_field_vars(hist_data, elt, rbe, rec); + update_field_vars(hist_data, elt, buffer, rbe, rec); } static inline void add_to_key(char *compound_key, void *key, @@ -4478,7 +4504,8 @@ static inline void add_to_key(char *compound_key, void *key, static void hist_trigger_actions(struct hist_trigger_data *hist_data, - struct tracing_map_elt *elt, void *rec, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe, void *key, u64 *var_ref_vals) { @@ -4487,11 +4514,12 @@ hist_trigger_actions(struct hist_trigger_data *hist_data, for (i = 0; i < hist_data->n_actions; i++) { data = hist_data->actions[i]; - data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals); + data->fn(hist_data, elt, buffer, rec, rbe, key, data, var_ref_vals); } } -static void event_hist_trigger(struct event_trigger_data *data, void *rec, +static void event_hist_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *rbe) { struct hist_trigger_data *hist_data = data->private_data; @@ -4516,7 +4544,7 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec, HIST_STACKTRACE_SKIP); key = entries; } else { - field_contents = key_field->fn(key_field, elt, rbe, rec); + field_contents = key_field->fn(key_field, elt, buffer, rbe, rec); if (key_field->flags & HIST_FIELD_FL_STRING) { key = (void *)(unsigned long)field_contents; use_compound_key = true; @@ -4539,10 +4567,10 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec, if (!elt) return; - hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals); + hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, var_ref_vals); if (resolve_var_refs(hist_data, key, var_ref_vals, true)) - hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals); + hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals); } static void hist_trigger_stacktrace_print(struct seq_file *m, @@ -5812,7 +5840,8 @@ __init int register_trigger_hist_cmd(void) } static void -hist_enable_trigger(struct event_trigger_data *data, void *rec, +hist_enable_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { struct enable_trigger_data *enable_data = data->private_data; @@ -5830,7 +5859,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec, } static void -hist_enable_count_trigger(struct event_trigger_data *data, void *rec, +hist_enable_count_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { if (!data->count) @@ -5839,7 +5869,7 @@ hist_enable_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - hist_enable_trigger(data, rec, event); + hist_enable_trigger(data, buffer, rec, event); } static struct event_trigger_ops hist_enable_trigger_ops = { diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index f725802160c0..b8bfa8505b7b 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -53,7 +53,8 @@ void trigger_data_free(struct event_trigger_data *data) * any trigger that should be deferred, ETT_NONE if nothing to defer. */ enum event_trigger_type -event_triggers_call(struct trace_event_file *file, void *rec, +event_triggers_call(struct trace_event_file *file, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { struct event_trigger_data *data; @@ -67,7 +68,7 @@ event_triggers_call(struct trace_event_file *file, void *rec, if (data->paused) continue; if (!rec) { - data->ops->func(data, rec, event); + data->ops->func(data, buffer, rec, event); continue; } filter = rcu_dereference_sched(data->filter); @@ -77,7 +78,7 @@ event_triggers_call(struct trace_event_file *file, void *rec, tt |= data->cmd_ops->trigger_type; continue; } - data->ops->func(data, rec, event); + data->ops->func(data, buffer, rec, event); } return tt; } @@ -105,7 +106,7 @@ event_triggers_post_call(struct trace_event_file *file, if (data->paused) continue; if (data->cmd_ops->trigger_type & tt) - data->ops->func(data, NULL, NULL); + data->ops->func(data, NULL, NULL, NULL); } } EXPORT_SYMBOL_GPL(event_triggers_post_call); @@ -937,7 +938,8 @@ get_named_trigger_data(struct event_trigger_data *data) } static void -traceon_trigger(struct event_trigger_data *data, void *rec, +traceon_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { if (tracing_is_on()) @@ -947,7 +949,8 @@ traceon_trigger(struct event_trigger_data *data, void *rec, } static void -traceon_count_trigger(struct event_trigger_data *data, void *rec, +traceon_count_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { if (tracing_is_on()) @@ -963,7 +966,8 @@ traceon_count_trigger(struct event_trigger_data *data, void *rec, } static void -traceoff_trigger(struct event_trigger_data *data, void *rec, +traceoff_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { if (!tracing_is_on()) @@ -973,7 +977,8 @@ traceoff_trigger(struct event_trigger_data *data, void *rec, } static void -traceoff_count_trigger(struct event_trigger_data *data, void *rec, +traceoff_count_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { if (!tracing_is_on()) @@ -1071,7 +1076,8 @@ static struct event_command trigger_traceoff_cmd = { #ifdef CONFIG_TRACER_SNAPSHOT static void -snapshot_trigger(struct event_trigger_data *data, void *rec, +snapshot_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { struct trace_event_file *file = data->private_data; @@ -1083,7 +1089,8 @@ snapshot_trigger(struct event_trigger_data *data, void *rec, } static void -snapshot_count_trigger(struct event_trigger_data *data, void *rec, +snapshot_count_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { if (!data->count) @@ -1092,7 +1099,7 @@ snapshot_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - snapshot_trigger(data, rec, event); + snapshot_trigger(data, buffer, rec, event); } static int @@ -1176,14 +1183,16 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; } #endif static void -stacktrace_trigger(struct event_trigger_data *data, void *rec, +stacktrace_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { trace_dump_stack(STACK_SKIP); } static void -stacktrace_count_trigger(struct event_trigger_data *data, void *rec, +stacktrace_count_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { if (!data->count) @@ -1192,7 +1201,7 @@ stacktrace_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - stacktrace_trigger(data, rec, event); + stacktrace_trigger(data, buffer, rec, event); } static int @@ -1254,7 +1263,8 @@ static __init void unregister_trigger_traceon_traceoff_cmds(void) } static void -event_enable_trigger(struct event_trigger_data *data, void *rec, +event_enable_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { struct enable_trigger_data *enable_data = data->private_data; @@ -1266,7 +1276,8 @@ event_enable_trigger(struct event_trigger_data *data, void *rec, } static void -event_enable_count_trigger(struct event_trigger_data *data, void *rec, +event_enable_count_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event) { struct enable_trigger_data *enable_data = data->private_data; @@ -1281,7 +1292,7 @@ event_enable_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - event_enable_trigger(data, rec, event); + event_enable_trigger(data, buffer, rec, event); } int event_enable_trigger_print(struct seq_file *m, -- cgit v1.2.3-71-gd317 From efe6196a6bc5bbc84b856316c4687fd24566a95c Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 16 Mar 2021 12:41:04 -0400 Subject: ring-buffer: Allow ring_buffer_event_time_stamp() to return time stamp of all events Currently, ring_buffer_event_time_stamp() only returns an accurate time stamp of the event if it has an absolute extended time stamp attached to it. To make it more robust, use the event_stamp() in case the event does not have an absolute value attached to it. This will allow ring_buffer_event_time_stamp() to be used in more cases than just histograms, and it will also allow histograms to not require including absolute values all the time. Link: https://lkml.kernel.org/r/20210316164113.704830885@goodmis.org Reviewed-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) --- include/linux/ring_buffer.h | 3 +- kernel/trace/ring_buffer.c | 60 ++++++++++++++++++++++++++++++---------- kernel/trace/trace_events_hist.c | 2 +- 3 files changed, 48 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 136ea0997e6d..057b7ed4fe24 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -61,7 +61,8 @@ enum ring_buffer_type { unsigned ring_buffer_event_length(struct ring_buffer_event *event); void *ring_buffer_event_data(struct ring_buffer_event *event); -u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); +u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, + struct ring_buffer_event *event); /* * ring_buffer_discard_commit will remove an event that has not diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 470d97169081..8fa2a84f714f 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -298,21 +298,6 @@ static u64 rb_event_time_stamp(struct ring_buffer_event *event) return ts; } -/** - * ring_buffer_event_time_stamp - return the event's extended timestamp - * @event: the event to get the timestamp of - * - * Returns the extended timestamp associated with a data event. - * An extended time_stamp is a 64-bit timestamp represented - * internally in a special way that makes the best use of space - * contained within a ring buffer event. This function decodes - * it and maps it to a straight u64 value. - */ -u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event) -{ - return rb_event_time_stamp(event); -} - /* Flag when events were overwritten */ #define RB_MISSED_EVENTS (1 << 31) /* Missed count stored at end */ @@ -757,6 +742,51 @@ static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) } #endif +static inline u64 rb_time_stamp(struct trace_buffer *buffer); + +/** + * ring_buffer_event_time_stamp - return the event's current time stamp + * @buffer: The buffer that the event is on + * @event: the event to get the time stamp of + * + * Note, this must be called after @event is reserved, and before it is + * committed to the ring buffer. And must be called from the same + * context where the event was reserved (normal, softirq, irq, etc). + * + * Returns the time stamp associated with the current event. + * If the event has an extended time stamp, then that is used as + * the time stamp to return. + * In the highly unlikely case that the event was nested more than + * the max nesting, then the write_stamp of the buffer is returned, + * otherwise current time is returned, but that really neither of + * the last two cases should ever happen. + */ +u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, + struct ring_buffer_event *event) +{ + struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; + unsigned int nest; + u64 ts; + + /* If the event includes an absolute time, then just use that */ + if (event->type_len == RINGBUF_TYPE_TIME_STAMP) + return rb_event_time_stamp(event); + + /* Read the current saved nesting level time stamp */ + nest = local_read(&cpu_buffer->committing) - 1; + if (likely(nest < MAX_NEST)) + return cpu_buffer->event_stamp[nest]; + + WARN_ON_ONCE(1); + + /* Can only fail on 32 bit */ + if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) + /* Screw it, just read the current time */ + ts = rb_time_stamp(cpu_buffer->buffer); + + return ts; +} + /** * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer * @buffer: The ring_buffer to get the number of pages from diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 6978aa3ee4c5..45986cb4637e 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -644,7 +644,7 @@ static u64 hist_field_timestamp(struct hist_field *hist_field, struct hist_trigger_data *hist_data = hist_field->hist_data; struct trace_array *tr = hist_data->event_file->tr; - u64 ts = ring_buffer_event_time_stamp(rbe); + u64 ts = ring_buffer_event_time_stamp(buffer, rbe); if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) ts = ns2usecs(ts); -- cgit v1.2.3-71-gd317 From f2616c772c768485de18e7fcb2816bcdcd098339 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 17 Mar 2021 13:34:35 -0400 Subject: seq_buf: Add seq_buf_terminate() API In the case that the seq_buf buffer needs to be printed directly, add a way to make sure that the buffer is safe to read by forcing a nul terminating character at the end of the string, or the last byte of the buffer if the string has overflowed. Signed-off-by: Steven Rostedt (VMware) --- include/linux/seq_buf.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'include/linux') diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index 9d6c28cc4d8f..5b31c5147969 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -71,6 +71,31 @@ static inline unsigned int seq_buf_used(struct seq_buf *s) return min(s->len, s->size); } +/** + * seq_buf_terminate - Make sure buffer is nul terminated + * @s: the seq_buf descriptor to terminate. + * + * This makes sure that the buffer in @s is nul terminated and + * safe to read as a string. + * + * Note, if this is called when the buffer has overflowed, then + * the last byte of the buffer is zeroed, and the len will still + * point passed it. + * + * After this function is called, s->buffer is safe to use + * in string operations. + */ +static inline void seq_buf_terminate(struct seq_buf *s) +{ + if (WARN_ON(s->size == 0)) + return; + + if (seq_buf_buffer_left(s)) + s->buffer[s->len] = 0; + else + s->buffer[s->size - 1] = 0; +} + /** * seq_buf_get_buf - get buffer to write arbitrary data to * @s: the seq_buf handle -- cgit v1.2.3-71-gd317 From 73f620951b2b594bdc38722c0d647c3b3312af7a Mon Sep 17 00:00:00 2001 From: Claire Chang Date: Thu, 18 Mar 2021 17:14:22 +0100 Subject: swiotlb: move global variables into a new io_tlb_mem structure Added a new struct, io_tlb_mem, as the IO TLB memory pool descriptor and moved relevant global variables into that struct. This will be useful later to allow for restricted DMA pool. Signed-off-by: Claire Chang [hch: rebased] Signed-off-by: Christoph Hellwig Signed-off-by: Konrad Rzeszutek Wilk --- drivers/xen/swiotlb-xen.c | 2 +- include/linux/swiotlb.h | 43 +++++- kernel/dma/swiotlb.c | 354 +++++++++++++++++++++------------------------- 3 files changed, 206 insertions(+), 193 deletions(-) (limited to 'include/linux') diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 4ecfce2c6f72..5329ad54a5f3 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -548,7 +548,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, static int xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return xen_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; + return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask; } const struct dma_map_ops xen_swiotlb_dma_ops = { diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 0696bdc8072e..5ec5378b17c3 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -6,6 +6,7 @@ #include #include #include +#include struct device; struct page; @@ -61,11 +62,49 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, #ifdef CONFIG_SWIOTLB extern enum swiotlb_force swiotlb_force; -extern phys_addr_t io_tlb_start, io_tlb_end; + +/** + * struct io_tlb_mem - IO TLB Memory Pool Descriptor + * + * @start: The start address of the swiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @end: The end address of the swiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and + * @end. This is command line adjustable via setup_io_tlb_npages. + * @used: The number of used IO TLB block. + * @list: The free list describing the number of free entries available + * from each index. + * @index: The index to start searching in the next round. + * @orig_addr: The original address corresponding to a mapped entry. + * @alloc_size: Size of the allocated buffer. + * @lock: The lock to protect the above data structures in the map and + * unmap calls. + * @debugfs: The dentry to debugfs. + * @late_alloc: %true if allocated using the page allocator + */ +struct io_tlb_mem { + phys_addr_t start; + phys_addr_t end; + unsigned long nslabs; + unsigned long used; + unsigned int *list; + unsigned int index; + phys_addr_t *orig_addr; + size_t *alloc_size; + spinlock_t lock; + struct dentry *debugfs; + bool late_alloc; +}; +extern struct io_tlb_mem io_tlb_default_mem; static inline bool is_swiotlb_buffer(phys_addr_t paddr) { - return paddr >= io_tlb_start && paddr < io_tlb_end; + struct io_tlb_mem *mem = &io_tlb_default_mem; + + return paddr >= mem->start && paddr < mem->end; } void __init swiotlb_exit(void); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 35e24f0ff8b2..d9c097f0f78c 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -59,32 +59,11 @@ */ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) -enum swiotlb_force swiotlb_force; - -/* - * Used to do a quick range check in swiotlb_tbl_unmap_single and - * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this - * API. - */ -phys_addr_t io_tlb_start, io_tlb_end; - -/* - * The number of IO TLB blocks (in groups of 64) between io_tlb_start and - * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. - */ -static unsigned long io_tlb_nslabs; +#define INVALID_PHYS_ADDR (~(phys_addr_t)0) -/* - * The number of used IO TLB block - */ -static unsigned long io_tlb_used; +enum swiotlb_force swiotlb_force; -/* - * This is a free list describing the number of free entries available from - * each index - */ -static unsigned int *io_tlb_list; -static unsigned int io_tlb_index; +struct io_tlb_mem io_tlb_default_mem; /* * Max segment that we can provide which (if pages are contingous) will @@ -92,32 +71,15 @@ static unsigned int io_tlb_index; */ static unsigned int max_segment; -/* - * We need to save away the original address corresponding to a mapped entry - * for the sync operations. - */ -#define INVALID_PHYS_ADDR (~(phys_addr_t)0) -static phys_addr_t *io_tlb_orig_addr; - -/* - * The mapped buffer's size should be validated during a sync operation. - */ -static size_t *io_tlb_alloc_size; - -/* - * Protect the above data structures in the map and unmap calls - */ -static DEFINE_SPINLOCK(io_tlb_lock); - -static int late_alloc; - static int __init setup_io_tlb_npages(char *str) { + struct io_tlb_mem *mem = &io_tlb_default_mem; + if (isdigit(*str)) { - io_tlb_nslabs = simple_strtoul(str, &str, 0); + mem->nslabs = simple_strtoul(str, &str, 0); /* avoid tail segment of size < IO_TLB_SEGSIZE */ - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE); } if (*str == ',') ++str; @@ -125,7 +87,7 @@ setup_io_tlb_npages(char *str) swiotlb_force = SWIOTLB_FORCE; } else if (!strcmp(str, "noforce")) { swiotlb_force = SWIOTLB_NO_FORCE; - io_tlb_nslabs = 1; + mem->nslabs = 1; } return 0; @@ -136,7 +98,7 @@ static bool no_iotlb_memory; unsigned long swiotlb_nr_tbl(void) { - return unlikely(no_iotlb_memory) ? 0 : io_tlb_nslabs; + return unlikely(no_iotlb_memory) ? 0 : io_tlb_default_mem.nslabs; } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); @@ -158,13 +120,14 @@ unsigned long swiotlb_size_or_default(void) { unsigned long size; - size = io_tlb_nslabs << IO_TLB_SHIFT; + size = io_tlb_default_mem.nslabs << IO_TLB_SHIFT; return size ? size : (IO_TLB_DEFAULT_SIZE); } void __init swiotlb_adjust_size(unsigned long new_size) { + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long size; /* @@ -172,10 +135,10 @@ void __init swiotlb_adjust_size(unsigned long new_size) * architectures such as those supporting memory encryption to * adjust/expand SWIOTLB size for their use. */ - if (!io_tlb_nslabs) { + if (!mem->nslabs) { size = ALIGN(new_size, IO_TLB_SIZE); - io_tlb_nslabs = size >> IO_TLB_SHIFT; - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + mem->nslabs = size >> IO_TLB_SHIFT; + mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE); pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); } @@ -183,14 +146,15 @@ void __init swiotlb_adjust_size(unsigned long new_size) void swiotlb_print_info(void) { - unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; + struct io_tlb_mem *mem = &io_tlb_default_mem; + unsigned long bytes = mem->nslabs << IO_TLB_SHIFT; if (no_iotlb_memory) { pr_warn("No low mem\n"); return; } - pr_info("mapped [mem %pa-%pa] (%luMB)\n", &io_tlb_start, &io_tlb_end, + pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, bytes >> 20); } @@ -212,68 +176,71 @@ static inline unsigned long nr_slots(u64 val) */ void __init swiotlb_update_mem_attributes(void) { + struct io_tlb_mem *mem = &io_tlb_default_mem; void *vaddr; unsigned long bytes; - if (no_iotlb_memory || late_alloc) + if (no_iotlb_memory || mem->late_alloc) return; - vaddr = phys_to_virt(io_tlb_start); - bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); + vaddr = phys_to_virt(mem->start); + bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); } int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long i, bytes; size_t alloc_size; /* protect against double initialization */ - if (WARN_ON_ONCE(io_tlb_start)) + if (WARN_ON_ONCE(mem->start)) return -ENOMEM; bytes = nslabs << IO_TLB_SHIFT; - io_tlb_nslabs = nslabs; - io_tlb_start = __pa(tlb); - io_tlb_end = io_tlb_start + bytes; + mem->nslabs = nslabs; + mem->start = __pa(tlb); + mem->end = mem->start + bytes; + mem->index = 0; + spin_lock_init(&mem->lock); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE - * between io_tlb_start and io_tlb_end. + * between mem->start and mem->end. */ - alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(int)); - io_tlb_list = memblock_alloc(alloc_size, PAGE_SIZE); - if (!io_tlb_list) + alloc_size = PAGE_ALIGN(mem->nslabs * sizeof(int)); + mem->list = memblock_alloc(alloc_size, PAGE_SIZE); + if (!mem->list) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)); - io_tlb_orig_addr = memblock_alloc(alloc_size, PAGE_SIZE); - if (!io_tlb_orig_addr) + alloc_size = PAGE_ALIGN(mem->nslabs * sizeof(phys_addr_t)); + mem->orig_addr = memblock_alloc(alloc_size, PAGE_SIZE); + if (!mem->orig_addr) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t)); - io_tlb_alloc_size = memblock_alloc(alloc_size, PAGE_SIZE); - if (!io_tlb_alloc_size) + alloc_size = PAGE_ALIGN(mem->nslabs * sizeof(size_t)); + mem->alloc_size = memblock_alloc(alloc_size, PAGE_SIZE); + if (mem->alloc_size) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - for (i = 0; i < io_tlb_nslabs; i++) { - io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - io_tlb_alloc_size[i] = 0; + for (i = 0; i < mem->nslabs; i++) { + mem->list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); + mem->orig_addr[i] = INVALID_PHYS_ADDR; + mem->alloc_size[i] = 0; } - io_tlb_index = 0; no_iotlb_memory = false; if (verbose) swiotlb_print_info(); - swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); + swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); return 0; } @@ -284,26 +251,27 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) void __init swiotlb_init(int verbose) { + struct io_tlb_mem *mem = &io_tlb_default_mem; size_t default_size = IO_TLB_DEFAULT_SIZE; unsigned char *vstart; unsigned long bytes; - if (!io_tlb_nslabs) { - io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + if (!mem->nslabs) { + mem->nslabs = (default_size >> IO_TLB_SHIFT); + mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE); } - bytes = io_tlb_nslabs << IO_TLB_SHIFT; + bytes = mem->nslabs << IO_TLB_SHIFT; /* Get IO TLB memory from the low pages */ vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE); - if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) + if (vstart && !swiotlb_init_with_tbl(vstart, mem->nslabs, verbose)) return; - if (io_tlb_start) { - memblock_free_early(io_tlb_start, - PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); - io_tlb_start = 0; + if (mem->start) { + memblock_free_early(mem->start, + PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT)); + mem->start = 0; } pr_warn("Cannot allocate buffer"); no_iotlb_memory = true; @@ -317,22 +285,23 @@ swiotlb_init(int verbose) int swiotlb_late_init_with_default_size(size_t default_size) { - unsigned long bytes, req_nslabs = io_tlb_nslabs; + struct io_tlb_mem *mem = &io_tlb_default_mem; + unsigned long bytes, req_nslabs = mem->nslabs; unsigned char *vstart = NULL; unsigned int order; int rc = 0; - if (!io_tlb_nslabs) { - io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + if (!mem->nslabs) { + mem->nslabs = (default_size >> IO_TLB_SHIFT); + mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE); } /* * Get IO TLB memory from the low pages */ - order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); - io_tlb_nslabs = SLABS_PER_PAGE << order; - bytes = io_tlb_nslabs << IO_TLB_SHIFT; + order = get_order(mem->nslabs << IO_TLB_SHIFT); + mem->nslabs = SLABS_PER_PAGE << order; + bytes = mem->nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, @@ -343,15 +312,15 @@ swiotlb_late_init_with_default_size(size_t default_size) } if (!vstart) { - io_tlb_nslabs = req_nslabs; + mem->nslabs = req_nslabs; return -ENOMEM; } if (order != get_order(bytes)) { pr_warn("only able to allocate %ld MB\n", (PAGE_SIZE << order) >> 20); - io_tlb_nslabs = SLABS_PER_PAGE << order; + mem->nslabs = SLABS_PER_PAGE << order; } - rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); + rc = swiotlb_late_init_with_tbl(vstart, mem->nslabs); if (rc) free_pages((unsigned long)vstart, order); @@ -360,26 +329,32 @@ swiotlb_late_init_with_default_size(size_t default_size) static void swiotlb_cleanup(void) { - io_tlb_end = 0; - io_tlb_start = 0; - io_tlb_nslabs = 0; + struct io_tlb_mem *mem = &io_tlb_default_mem; + + mem->end = 0; + mem->start = 0; + mem->nslabs = 0; max_segment = 0; } int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) { + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long i, bytes; /* protect against double initialization */ - if (WARN_ON_ONCE(io_tlb_start)) + if (WARN_ON_ONCE(mem->start)) return -ENOMEM; bytes = nslabs << IO_TLB_SHIFT; - io_tlb_nslabs = nslabs; - io_tlb_start = virt_to_phys(tlb); - io_tlb_end = io_tlb_start + bytes; + mem->nslabs = nslabs; + mem->start = virt_to_phys(tlb); + mem->end = mem->start + bytes; + mem->index = 0; + mem->late_alloc = 1; + spin_lock_init(&mem->lock); set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); memset(tlb, 0, bytes); @@ -387,52 +362,45 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE - * between io_tlb_start and io_tlb_end. + * between mem->start and mem->end. */ - io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, - get_order(io_tlb_nslabs * sizeof(int))); - if (!io_tlb_list) + mem->list = (unsigned int *)__get_free_pages(GFP_KERNEL, + get_order(mem->nslabs * sizeof(int))); + if (!mem->list) goto cleanup3; - io_tlb_orig_addr = (phys_addr_t *) + mem->orig_addr = (phys_addr_t *) __get_free_pages(GFP_KERNEL, - get_order(io_tlb_nslabs * + get_order(mem->nslabs * sizeof(phys_addr_t))); - if (!io_tlb_orig_addr) + if (!mem->orig_addr) goto cleanup4; - io_tlb_alloc_size = (size_t *) + mem->alloc_size = (size_t *) __get_free_pages(GFP_KERNEL, - get_order(io_tlb_nslabs * + get_order(mem->nslabs * sizeof(size_t))); - if (!io_tlb_alloc_size) + if (!mem->alloc_size) goto cleanup5; - - for (i = 0; i < io_tlb_nslabs; i++) { - io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - io_tlb_alloc_size[i] = 0; + for (i = 0; i < mem->nslabs; i++) { + mem->list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); + mem->orig_addr[i] = INVALID_PHYS_ADDR; + mem->alloc_size[i] = 0; } - io_tlb_index = 0; no_iotlb_memory = false; swiotlb_print_info(); - - late_alloc = 1; - - swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); - + swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); return 0; cleanup5: - free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * - sizeof(phys_addr_t))); - + free_pages((unsigned long)mem->orig_addr, + get_order(mem->nslabs * sizeof(phys_addr_t))); cleanup4: - free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * - sizeof(int))); - io_tlb_list = NULL; + free_pages((unsigned long)mem->list, + get_order(mem->nslabs * sizeof(int))); + mem->list = NULL; cleanup3: swiotlb_cleanup(); return -ENOMEM; @@ -440,27 +408,29 @@ cleanup3: void __init swiotlb_exit(void) { - if (!io_tlb_orig_addr) + struct io_tlb_mem *mem = &io_tlb_default_mem; + + if (!mem->orig_addr) return; - if (late_alloc) { - free_pages((unsigned long)io_tlb_alloc_size, - get_order(io_tlb_nslabs * sizeof(size_t))); - free_pages((unsigned long)io_tlb_orig_addr, - get_order(io_tlb_nslabs * sizeof(phys_addr_t))); - free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * - sizeof(int))); - free_pages((unsigned long)phys_to_virt(io_tlb_start), - get_order(io_tlb_nslabs << IO_TLB_SHIFT)); + if (mem->late_alloc) { + free_pages((unsigned long)mem->alloc_size, + get_order(mem->nslabs * sizeof(size_t))); + free_pages((unsigned long)mem->orig_addr, + get_order(mem->nslabs * sizeof(phys_addr_t))); + free_pages((unsigned long)mem->list, + get_order(mem->nslabs * sizeof(int))); + free_pages((unsigned long)phys_to_virt(mem->start), + get_order(mem->nslabs << IO_TLB_SHIFT)); } else { - memblock_free_late(__pa(io_tlb_orig_addr), - PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); - memblock_free_late(__pa(io_tlb_alloc_size), - PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t))); - memblock_free_late(__pa(io_tlb_list), - PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); - memblock_free_late(io_tlb_start, - PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); + memblock_free_late(__pa(mem->alloc_size), + PAGE_ALIGN(mem->nslabs * sizeof(size_t))); + memblock_free_late(__pa(mem->orig_addr), + PAGE_ALIGN(mem->nslabs * sizeof(phys_addr_t))); + memblock_free_late(__pa(mem->list), + PAGE_ALIGN(mem->nslabs * sizeof(int))); + memblock_free_late(mem->start, + PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT)); } swiotlb_cleanup(); } @@ -471,9 +441,10 @@ void __init swiotlb_exit(void) static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir) { - int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; - size_t alloc_size = io_tlb_alloc_size[index]; - phys_addr_t orig_addr = io_tlb_orig_addr[index]; + struct io_tlb_mem *mem = &io_tlb_default_mem; + int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; + phys_addr_t orig_addr = mem->orig_addr[index]; + size_t alloc_size = mem->alloc_size[index]; unsigned long pfn = PFN_DOWN(orig_addr); unsigned char *vaddr = phys_to_virt(tlb_addr); @@ -538,9 +509,9 @@ static inline unsigned long get_max_slots(unsigned long boundary_mask) return nr_slots(boundary_mask + 1); } -static unsigned int wrap_index(unsigned int index) +static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) { - if (index >= io_tlb_nslabs) + if (index >= mem->nslabs) return 0; return index; } @@ -552,9 +523,10 @@ static unsigned int wrap_index(unsigned int index) static int find_slots(struct device *dev, phys_addr_t orig_addr, size_t alloc_size) { + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long boundary_mask = dma_get_seg_boundary(dev); dma_addr_t tbl_dma_addr = - phys_to_dma_unencrypted(dev, io_tlb_start) & boundary_mask; + phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; unsigned long max_slots = get_max_slots(boundary_mask); unsigned int iotlb_align_mask = dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); @@ -573,15 +545,15 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr, if (alloc_size >= PAGE_SIZE) stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); - spin_lock_irqsave(&io_tlb_lock, flags); - if (unlikely(nslots > io_tlb_nslabs - io_tlb_used)) + spin_lock_irqsave(&mem->lock, flags); + if (unlikely(nslots > mem->nslabs - mem->used)) goto not_found; - index = wrap = wrap_index(ALIGN(io_tlb_index, stride)); + index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); do { if ((slot_addr(tbl_dma_addr, index) & iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { - index = wrap_index(index + 1); + index = wrap_index(mem, index + 1); continue; } @@ -593,34 +565,34 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr, if (!iommu_is_span_boundary(index, nslots, nr_slots(tbl_dma_addr), max_slots)) { - if (io_tlb_list[index] >= nslots) + if (mem->list[index] >= nslots) goto found; } - index = wrap_index(index + stride); + index = wrap_index(mem, index + stride); } while (index != wrap); not_found: - spin_unlock_irqrestore(&io_tlb_lock, flags); + spin_unlock_irqrestore(&mem->lock, flags); return -1; found: for (i = index; i < index + nslots; i++) - io_tlb_list[i] = 0; + mem->list[i] = 0; for (i = index - 1; io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && - io_tlb_list[i]; i--) - io_tlb_list[i] = ++count; + mem->list[i]; i--) + mem->list[i] = ++count; /* * Update the indices to avoid searching in the next round. */ - if (index + nslots < io_tlb_nslabs) - io_tlb_index = index + nslots; + if (index + nslots < mem->nslabs) + mem->index = index + nslots; else - io_tlb_index = 0; - io_tlb_used += nslots; + mem->index = 0; + mem->used += nslots; - spin_unlock_irqrestore(&io_tlb_lock, flags); + spin_unlock_irqrestore(&mem->lock, flags); return index; } @@ -628,6 +600,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, unsigned long attrs) { + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned int offset = swiotlb_align_offset(dev, orig_addr); unsigned int index, i; phys_addr_t tlb_addr; @@ -649,7 +622,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, if (!(attrs & DMA_ATTR_NO_WARN)) dev_warn_ratelimited(dev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", - alloc_size, io_tlb_nslabs, io_tlb_used); + alloc_size, mem->nslabs, mem->used); return (phys_addr_t)DMA_MAPPING_ERROR; } @@ -659,10 +632,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, * needed. */ for (i = 0; i < nr_slots(alloc_size + offset); i++) { - io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i); - io_tlb_alloc_size[index+i] = alloc_size - (i << IO_TLB_SHIFT); + mem->orig_addr[index + i] = slot_addr(orig_addr, i); + mem->alloc_size[index + i] = alloc_size - (i << IO_TLB_SHIFT); } - tlb_addr = slot_addr(io_tlb_start, index) + offset; + tlb_addr = slot_addr(mem->start, index) + offset; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); @@ -676,10 +649,11 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t mapping_size, enum dma_data_direction dir, unsigned long attrs) { + struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long flags; unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr); - int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT; - int nslots = nr_slots(io_tlb_alloc_size[index] + offset); + int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; + int nslots = nr_slots(mem->alloc_size[index] + offset); int count, i; /* @@ -695,9 +669,9 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * While returning the entries to the free list, we merge the entries * with slots below and above the pool being returned. */ - spin_lock_irqsave(&io_tlb_lock, flags); + spin_lock_irqsave(&mem->lock, flags); if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) - count = io_tlb_list[index + nslots]; + count = mem->list[index + nslots]; else count = 0; @@ -706,9 +680,9 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * superceeding slots */ for (i = index + nslots - 1; i >= index; i--) { - io_tlb_list[i] = ++count; - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - io_tlb_alloc_size[i] = 0; + mem->list[i] = ++count; + mem->orig_addr[i] = INVALID_PHYS_ADDR; + mem->alloc_size[i] = 0; } /* @@ -716,11 +690,11 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * available (non zero) */ for (i = index - 1; - io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && io_tlb_list[i]; + io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->list[i]; i--) - io_tlb_list[i] = ++count; - io_tlb_used -= nslots; - spin_unlock_irqrestore(&io_tlb_lock, flags); + mem->list[i] = ++count; + mem->used -= nslots; + spin_unlock_irqrestore(&mem->lock, flags); } void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, @@ -783,21 +757,21 @@ size_t swiotlb_max_mapping_size(struct device *dev) bool is_swiotlb_active(void) { /* - * When SWIOTLB is initialized, even if io_tlb_start points to physical - * address zero, io_tlb_end surely doesn't. + * When SWIOTLB is initialized, even if mem->start points to physical + * address zero, mem->end surely doesn't. */ - return io_tlb_end != 0; + return io_tlb_default_mem.end != 0; } #ifdef CONFIG_DEBUG_FS static int __init swiotlb_create_debugfs(void) { - struct dentry *root; + struct io_tlb_mem *mem = &io_tlb_default_mem; - root = debugfs_create_dir("swiotlb", NULL); - debugfs_create_ulong("io_tlb_nslabs", 0400, root, &io_tlb_nslabs); - debugfs_create_ulong("io_tlb_used", 0400, root, &io_tlb_used); + mem->debugfs = debugfs_create_dir("swiotlb", NULL); + debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); + debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); return 0; } -- cgit v1.2.3-71-gd317 From 2d29960af0bee8cc6731b9bd3964850c9e7a6840 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 18 Mar 2021 17:14:23 +0100 Subject: swiotlb: dynamically allocate io_tlb_default_mem Instead of allocating ->list and ->orig_addr separately just do one dynamic allocation for the actual io_tlb_mem structure. This simplifies a lot of the initialization code, and also allows to just check io_tlb_default_mem to see if swiotlb is in use. Signed-off-by: Christoph Hellwig Signed-off-by: Konrad Rzeszutek Wilk --- drivers/xen/swiotlb-xen.c | 22 ++-- include/linux/swiotlb.h | 18 +-- kernel/dma/swiotlb.c | 306 +++++++++++++++------------------------------- 3 files changed, 117 insertions(+), 229 deletions(-) (limited to 'include/linux') diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 5329ad54a5f3..4c89afc0df62 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -158,17 +158,14 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err) int __ref xen_swiotlb_init(void) { enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; - unsigned long nslabs, bytes, order; - unsigned int repeat = 3; + unsigned long bytes = swiotlb_size_or_default(); + unsigned long nslabs = bytes >> IO_TLB_SHIFT; + unsigned int order, repeat = 3; int rc = -ENOMEM; char *start; - nslabs = swiotlb_nr_tbl(); - if (!nslabs) - nslabs = DEFAULT_NSLABS; retry: m_ret = XEN_SWIOTLB_ENOMEM; - bytes = nslabs << IO_TLB_SHIFT; order = get_order(bytes); /* @@ -221,19 +218,16 @@ error: #ifdef CONFIG_X86 void __init xen_swiotlb_init_early(void) { - unsigned long nslabs, bytes; + unsigned long bytes = swiotlb_size_or_default(); + unsigned long nslabs = bytes >> IO_TLB_SHIFT; unsigned int repeat = 3; char *start; int rc; - nslabs = swiotlb_nr_tbl(); - if (!nslabs) - nslabs = DEFAULT_NSLABS; retry: /* * Get IO TLB memory from any location. */ - bytes = nslabs << IO_TLB_SHIFT; start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); if (!start) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", @@ -248,8 +242,8 @@ retry: if (repeat--) { /* Min is 2MB */ nslabs = max(1024UL, (nslabs >> 1)); - pr_info("Lowering to %luMB\n", - (nslabs << IO_TLB_SHIFT) >> 20); + bytes = nslabs << IO_TLB_SHIFT; + pr_info("Lowering to %luMB\n", bytes >> 20); goto retry; } panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); @@ -548,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, static int xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask; + return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask; } const struct dma_map_ops xen_swiotlb_dma_ops = { diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5ec5378b17c3..63f7a63f61d0 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -90,28 +90,30 @@ struct io_tlb_mem { phys_addr_t end; unsigned long nslabs; unsigned long used; - unsigned int *list; unsigned int index; - phys_addr_t *orig_addr; - size_t *alloc_size; spinlock_t lock; struct dentry *debugfs; bool late_alloc; + struct io_tlb_slot { + phys_addr_t orig_addr; + size_t alloc_size; + unsigned int list; + } slots[]; }; -extern struct io_tlb_mem io_tlb_default_mem; +extern struct io_tlb_mem *io_tlb_default_mem; static inline bool is_swiotlb_buffer(phys_addr_t paddr) { - struct io_tlb_mem *mem = &io_tlb_default_mem; + struct io_tlb_mem *mem = io_tlb_default_mem; - return paddr >= mem->start && paddr < mem->end; + return mem && paddr >= mem->start && paddr < mem->end; } void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); size_t swiotlb_max_mapping_size(struct device *dev); bool is_swiotlb_active(void); -void __init swiotlb_adjust_size(unsigned long new_size); +void __init swiotlb_adjust_size(unsigned long size); #else #define swiotlb_force SWIOTLB_NO_FORCE static inline bool is_swiotlb_buffer(phys_addr_t paddr) @@ -135,7 +137,7 @@ static inline bool is_swiotlb_active(void) return false; } -static inline void swiotlb_adjust_size(unsigned long new_size) +static inline void swiotlb_adjust_size(unsigned long size) { } #endif /* CONFIG_SWIOTLB */ diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index d9c097f0f78c..13de669a9b46 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -63,7 +63,7 @@ enum swiotlb_force swiotlb_force; -struct io_tlb_mem io_tlb_default_mem; +struct io_tlb_mem *io_tlb_default_mem; /* * Max segment that we can provide which (if pages are contingous) will @@ -71,15 +71,15 @@ struct io_tlb_mem io_tlb_default_mem; */ static unsigned int max_segment; +static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; + static int __init setup_io_tlb_npages(char *str) { - struct io_tlb_mem *mem = &io_tlb_default_mem; - if (isdigit(*str)) { - mem->nslabs = simple_strtoul(str, &str, 0); /* avoid tail segment of size < IO_TLB_SEGSIZE */ - mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE); + default_nslabs = + ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); } if (*str == ',') ++str; @@ -87,24 +87,22 @@ setup_io_tlb_npages(char *str) swiotlb_force = SWIOTLB_FORCE; } else if (!strcmp(str, "noforce")) { swiotlb_force = SWIOTLB_NO_FORCE; - mem->nslabs = 1; + default_nslabs = 1; } return 0; } early_param("swiotlb", setup_io_tlb_npages); -static bool no_iotlb_memory; - unsigned long swiotlb_nr_tbl(void) { - return unlikely(no_iotlb_memory) ? 0 : io_tlb_default_mem.nslabs; + return io_tlb_default_mem ? io_tlb_default_mem->nslabs : 0; } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); unsigned int swiotlb_max_segment(void) { - return unlikely(no_iotlb_memory) ? 0 : max_segment; + return io_tlb_default_mem ? max_segment : 0; } EXPORT_SYMBOL_GPL(swiotlb_max_segment); @@ -118,44 +116,32 @@ void swiotlb_set_max_segment(unsigned int val) unsigned long swiotlb_size_or_default(void) { - unsigned long size; - - size = io_tlb_default_mem.nslabs << IO_TLB_SHIFT; - - return size ? size : (IO_TLB_DEFAULT_SIZE); + return default_nslabs << IO_TLB_SHIFT; } -void __init swiotlb_adjust_size(unsigned long new_size) +void __init swiotlb_adjust_size(unsigned long size) { - struct io_tlb_mem *mem = &io_tlb_default_mem; - unsigned long size; - /* * If swiotlb parameter has not been specified, give a chance to * architectures such as those supporting memory encryption to * adjust/expand SWIOTLB size for their use. */ - if (!mem->nslabs) { - size = ALIGN(new_size, IO_TLB_SIZE); - mem->nslabs = size >> IO_TLB_SHIFT; - mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE); - - pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); - } + size = ALIGN(size, IO_TLB_SIZE); + default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); + pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); } void swiotlb_print_info(void) { - struct io_tlb_mem *mem = &io_tlb_default_mem; - unsigned long bytes = mem->nslabs << IO_TLB_SHIFT; + struct io_tlb_mem *mem = io_tlb_default_mem; - if (no_iotlb_memory) { + if (!mem) { pr_warn("No low mem\n"); return; } pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, - bytes >> 20); + (mem->nslabs << IO_TLB_SHIFT) >> 20); } static inline unsigned long io_tlb_offset(unsigned long val) @@ -176,13 +162,12 @@ static inline unsigned long nr_slots(u64 val) */ void __init swiotlb_update_mem_attributes(void) { - struct io_tlb_mem *mem = &io_tlb_default_mem; + struct io_tlb_mem *mem = io_tlb_default_mem; void *vaddr; unsigned long bytes; - if (no_iotlb_memory || mem->late_alloc) + if (!mem || mem->late_alloc) return; - vaddr = phys_to_virt(mem->start); bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); @@ -191,55 +176,33 @@ void __init swiotlb_update_mem_attributes(void) int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { - struct io_tlb_mem *mem = &io_tlb_default_mem; - unsigned long i, bytes; + unsigned long bytes = nslabs << IO_TLB_SHIFT, i; + struct io_tlb_mem *mem; size_t alloc_size; /* protect against double initialization */ - if (WARN_ON_ONCE(mem->start)) + if (WARN_ON_ONCE(io_tlb_default_mem)) return -ENOMEM; - bytes = nslabs << IO_TLB_SHIFT; - + alloc_size = PAGE_ALIGN(struct_size(mem, slots, nslabs)); + mem = memblock_alloc(alloc_size, PAGE_SIZE); + if (!mem) + panic("%s: Failed to allocate %zu bytes align=0x%lx\n", + __func__, alloc_size, PAGE_SIZE); mem->nslabs = nslabs; mem->start = __pa(tlb); mem->end = mem->start + bytes; mem->index = 0; spin_lock_init(&mem->lock); - - /* - * Allocate and initialize the free list array. This array is used - * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE - * between mem->start and mem->end. - */ - alloc_size = PAGE_ALIGN(mem->nslabs * sizeof(int)); - mem->list = memblock_alloc(alloc_size, PAGE_SIZE); - if (!mem->list) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, alloc_size, PAGE_SIZE); - - alloc_size = PAGE_ALIGN(mem->nslabs * sizeof(phys_addr_t)); - mem->orig_addr = memblock_alloc(alloc_size, PAGE_SIZE); - if (!mem->orig_addr) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, alloc_size, PAGE_SIZE); - - alloc_size = PAGE_ALIGN(mem->nslabs * sizeof(size_t)); - mem->alloc_size = memblock_alloc(alloc_size, PAGE_SIZE); - if (mem->alloc_size) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, alloc_size, PAGE_SIZE); - for (i = 0; i < mem->nslabs; i++) { - mem->list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); - mem->orig_addr[i] = INVALID_PHYS_ADDR; - mem->alloc_size[i] = 0; + mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); + mem->slots[i].orig_addr = INVALID_PHYS_ADDR; + mem->slots[i].alloc_size = 0; } - no_iotlb_memory = false; + io_tlb_default_mem = mem; if (verbose) swiotlb_print_info(); - swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); return 0; } @@ -251,30 +214,21 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) void __init swiotlb_init(int verbose) { - struct io_tlb_mem *mem = &io_tlb_default_mem; - size_t default_size = IO_TLB_DEFAULT_SIZE; - unsigned char *vstart; - unsigned long bytes; - - if (!mem->nslabs) { - mem->nslabs = (default_size >> IO_TLB_SHIFT); - mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE); - } - - bytes = mem->nslabs << IO_TLB_SHIFT; + size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); + void *tlb; /* Get IO TLB memory from the low pages */ - vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE); - if (vstart && !swiotlb_init_with_tbl(vstart, mem->nslabs, verbose)) - return; - - if (mem->start) { - memblock_free_early(mem->start, - PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT)); - mem->start = 0; - } + tlb = memblock_alloc_low(bytes, PAGE_SIZE); + if (!tlb) + goto fail; + if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose)) + goto fail_free_mem; + return; + +fail_free_mem: + memblock_free_early(__pa(tlb), bytes); +fail: pr_warn("Cannot allocate buffer"); - no_iotlb_memory = true; } /* @@ -285,23 +239,19 @@ swiotlb_init(int verbose) int swiotlb_late_init_with_default_size(size_t default_size) { - struct io_tlb_mem *mem = &io_tlb_default_mem; - unsigned long bytes, req_nslabs = mem->nslabs; + unsigned long nslabs = + ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); + unsigned long bytes; unsigned char *vstart = NULL; unsigned int order; int rc = 0; - if (!mem->nslabs) { - mem->nslabs = (default_size >> IO_TLB_SHIFT); - mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE); - } - /* * Get IO TLB memory from the low pages */ - order = get_order(mem->nslabs << IO_TLB_SHIFT); - mem->nslabs = SLABS_PER_PAGE << order; - bytes = mem->nslabs << IO_TLB_SHIFT; + order = get_order(nslabs << IO_TLB_SHIFT); + nslabs = SLABS_PER_PAGE << order; + bytes = nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, @@ -311,43 +261,35 @@ swiotlb_late_init_with_default_size(size_t default_size) order--; } - if (!vstart) { - mem->nslabs = req_nslabs; + if (!vstart) return -ENOMEM; - } + if (order != get_order(bytes)) { pr_warn("only able to allocate %ld MB\n", (PAGE_SIZE << order) >> 20); - mem->nslabs = SLABS_PER_PAGE << order; + nslabs = SLABS_PER_PAGE << order; } - rc = swiotlb_late_init_with_tbl(vstart, mem->nslabs); + rc = swiotlb_late_init_with_tbl(vstart, nslabs); if (rc) free_pages((unsigned long)vstart, order); return rc; } -static void swiotlb_cleanup(void) -{ - struct io_tlb_mem *mem = &io_tlb_default_mem; - - mem->end = 0; - mem->start = 0; - mem->nslabs = 0; - max_segment = 0; -} - int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) { - struct io_tlb_mem *mem = &io_tlb_default_mem; - unsigned long i, bytes; + unsigned long bytes = nslabs << IO_TLB_SHIFT, i; + struct io_tlb_mem *mem; /* protect against double initialization */ - if (WARN_ON_ONCE(mem->start)) + if (WARN_ON_ONCE(io_tlb_default_mem)) return -ENOMEM; - bytes = nslabs << IO_TLB_SHIFT; + mem = (void *)__get_free_pages(GFP_KERNEL, + get_order(struct_size(mem, slots, nslabs))); + if (!mem) + return -ENOMEM; mem->nslabs = nslabs; mem->start = virt_to_phys(tlb); @@ -355,84 +297,35 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) mem->index = 0; mem->late_alloc = 1; spin_lock_init(&mem->lock); + for (i = 0; i < mem->nslabs; i++) { + mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); + mem->slots[i].orig_addr = INVALID_PHYS_ADDR; + mem->slots[i].alloc_size = 0; + } set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); memset(tlb, 0, bytes); - /* - * Allocate and initialize the free list array. This array is used - * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE - * between mem->start and mem->end. - */ - mem->list = (unsigned int *)__get_free_pages(GFP_KERNEL, - get_order(mem->nslabs * sizeof(int))); - if (!mem->list) - goto cleanup3; - - mem->orig_addr = (phys_addr_t *) - __get_free_pages(GFP_KERNEL, - get_order(mem->nslabs * - sizeof(phys_addr_t))); - if (!mem->orig_addr) - goto cleanup4; - - mem->alloc_size = (size_t *) - __get_free_pages(GFP_KERNEL, - get_order(mem->nslabs * - sizeof(size_t))); - if (!mem->alloc_size) - goto cleanup5; - - for (i = 0; i < mem->nslabs; i++) { - mem->list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); - mem->orig_addr[i] = INVALID_PHYS_ADDR; - mem->alloc_size[i] = 0; - } - no_iotlb_memory = false; - + io_tlb_default_mem = mem; swiotlb_print_info(); swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); return 0; - -cleanup5: - free_pages((unsigned long)mem->orig_addr, - get_order(mem->nslabs * sizeof(phys_addr_t))); -cleanup4: - free_pages((unsigned long)mem->list, - get_order(mem->nslabs * sizeof(int))); - mem->list = NULL; -cleanup3: - swiotlb_cleanup(); - return -ENOMEM; } void __init swiotlb_exit(void) { - struct io_tlb_mem *mem = &io_tlb_default_mem; + struct io_tlb_mem *mem = io_tlb_default_mem; + size_t size; - if (!mem->orig_addr) + if (!mem) return; - if (mem->late_alloc) { - free_pages((unsigned long)mem->alloc_size, - get_order(mem->nslabs * sizeof(size_t))); - free_pages((unsigned long)mem->orig_addr, - get_order(mem->nslabs * sizeof(phys_addr_t))); - free_pages((unsigned long)mem->list, - get_order(mem->nslabs * sizeof(int))); - free_pages((unsigned long)phys_to_virt(mem->start), - get_order(mem->nslabs << IO_TLB_SHIFT)); - } else { - memblock_free_late(__pa(mem->alloc_size), - PAGE_ALIGN(mem->nslabs * sizeof(size_t))); - memblock_free_late(__pa(mem->orig_addr), - PAGE_ALIGN(mem->nslabs * sizeof(phys_addr_t))); - memblock_free_late(__pa(mem->list), - PAGE_ALIGN(mem->nslabs * sizeof(int))); - memblock_free_late(mem->start, - PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT)); - } - swiotlb_cleanup(); + size = struct_size(mem, slots, mem->nslabs); + if (mem->late_alloc) + free_pages((unsigned long)mem, get_order(size)); + else + memblock_free_late(__pa(mem), PAGE_ALIGN(size)); + io_tlb_default_mem = NULL; } /* @@ -441,10 +334,10 @@ void __init swiotlb_exit(void) static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir) { - struct io_tlb_mem *mem = &io_tlb_default_mem; + struct io_tlb_mem *mem = io_tlb_default_mem; int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; - phys_addr_t orig_addr = mem->orig_addr[index]; - size_t alloc_size = mem->alloc_size[index]; + phys_addr_t orig_addr = mem->slots[index].orig_addr; + size_t alloc_size = mem->slots[index].alloc_size; unsigned long pfn = PFN_DOWN(orig_addr); unsigned char *vaddr = phys_to_virt(tlb_addr); @@ -523,7 +416,7 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) static int find_slots(struct device *dev, phys_addr_t orig_addr, size_t alloc_size) { - struct io_tlb_mem *mem = &io_tlb_default_mem; + struct io_tlb_mem *mem = io_tlb_default_mem; unsigned long boundary_mask = dma_get_seg_boundary(dev); dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; @@ -565,7 +458,7 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr, if (!iommu_is_span_boundary(index, nslots, nr_slots(tbl_dma_addr), max_slots)) { - if (mem->list[index] >= nslots) + if (mem->slots[index].list >= nslots) goto found; } index = wrap_index(mem, index + stride); @@ -577,11 +470,11 @@ not_found: found: for (i = index; i < index + nslots; i++) - mem->list[i] = 0; + mem->slots[i].list = 0; for (i = index - 1; io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && - mem->list[i]; i--) - mem->list[i] = ++count; + mem->slots[i].list; i--) + mem->slots[i].list = ++count; /* * Update the indices to avoid searching in the next round. @@ -600,12 +493,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, unsigned long attrs) { - struct io_tlb_mem *mem = &io_tlb_default_mem; + struct io_tlb_mem *mem = io_tlb_default_mem; unsigned int offset = swiotlb_align_offset(dev, orig_addr); unsigned int index, i; phys_addr_t tlb_addr; - if (no_iotlb_memory) + if (!mem) panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); if (mem_encrypt_active()) @@ -632,8 +525,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, * needed. */ for (i = 0; i < nr_slots(alloc_size + offset); i++) { - mem->orig_addr[index + i] = slot_addr(orig_addr, i); - mem->alloc_size[index + i] = alloc_size - (i << IO_TLB_SHIFT); + mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); + mem->slots[index + i].alloc_size = + alloc_size - (i << IO_TLB_SHIFT); } tlb_addr = slot_addr(mem->start, index) + offset; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && @@ -649,11 +543,11 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t mapping_size, enum dma_data_direction dir, unsigned long attrs) { - struct io_tlb_mem *mem = &io_tlb_default_mem; + struct io_tlb_mem *mem = io_tlb_default_mem; unsigned long flags; unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr); int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; - int nslots = nr_slots(mem->alloc_size[index] + offset); + int nslots = nr_slots(mem->slots[index].alloc_size + offset); int count, i; /* @@ -671,7 +565,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, */ spin_lock_irqsave(&mem->lock, flags); if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) - count = mem->list[index + nslots]; + count = mem->slots[index + nslots].list; else count = 0; @@ -680,9 +574,9 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * superceeding slots */ for (i = index + nslots - 1; i >= index; i--) { - mem->list[i] = ++count; - mem->orig_addr[i] = INVALID_PHYS_ADDR; - mem->alloc_size[i] = 0; + mem->slots[i].list = ++count; + mem->slots[i].orig_addr = INVALID_PHYS_ADDR; + mem->slots[i].alloc_size = 0; } /* @@ -690,9 +584,9 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * available (non zero) */ for (i = index - 1; - io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->list[i]; + io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; i--) - mem->list[i] = ++count; + mem->slots[i].list = ++count; mem->used -= nslots; spin_unlock_irqrestore(&mem->lock, flags); } @@ -756,19 +650,17 @@ size_t swiotlb_max_mapping_size(struct device *dev) bool is_swiotlb_active(void) { - /* - * When SWIOTLB is initialized, even if mem->start points to physical - * address zero, mem->end surely doesn't. - */ - return io_tlb_default_mem.end != 0; + return io_tlb_default_mem != NULL; } #ifdef CONFIG_DEBUG_FS static int __init swiotlb_create_debugfs(void) { - struct io_tlb_mem *mem = &io_tlb_default_mem; + struct io_tlb_mem *mem = io_tlb_default_mem; + if (!mem) + return 0; mem->debugfs = debugfs_create_dir("swiotlb", NULL); debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); -- cgit v1.2.3-71-gd317 From 2cbc2776efe4faed0e17c48ae076aa03a0fcc61f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 18 Mar 2021 17:14:24 +0100 Subject: swiotlb: remove swiotlb_nr_tbl All callers just use it to check if swiotlb is active at all, for which they can just use is_swiotlb_active. In the longer run drivers need to stop using is_swiotlb_active as well, but let's do the simple step first. Signed-off-by: Christoph Hellwig Signed-off-by: Konrad Rzeszutek Wilk --- drivers/gpu/drm/i915/gem/i915_gem_internal.c | 2 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 2 +- drivers/pci/xen-pcifront.c | 2 +- include/linux/swiotlb.h | 1 - kernel/dma/swiotlb.c | 7 +------ 5 files changed, 4 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index ad22f42541bd..a9d65fc8aa0e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) max_order = MAX_ORDER; #ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) { + if (is_swiotlb_active()) { unsigned int max_segment; max_segment = swiotlb_max_segment(); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index a37bc3d7b38b..9662522aa066 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -321,7 +321,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) } #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) - need_swiotlb = !!swiotlb_nr_tbl(); + need_swiotlb = is_swiotlb_active(); #endif ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver, diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 2d7502648219..b7a8f3a1921f 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -693,7 +693,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev) spin_unlock(&pcifront_dev_lock); - if (!err && !swiotlb_nr_tbl()) { + if (!err && !is_swiotlb_active()) { err = pci_xen_swiotlb_init_late(); if (err) dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n"); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 63f7a63f61d0..216854a5e513 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -37,7 +37,6 @@ enum swiotlb_force { extern void swiotlb_init(int verbose); int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); -extern unsigned long swiotlb_nr_tbl(void); unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); extern int swiotlb_late_init_with_default_size(size_t default_size); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 13de669a9b46..539c76beb52e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -94,12 +94,6 @@ setup_io_tlb_npages(char *str) } early_param("swiotlb", setup_io_tlb_npages); -unsigned long swiotlb_nr_tbl(void) -{ - return io_tlb_default_mem ? io_tlb_default_mem->nslabs : 0; -} -EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); - unsigned int swiotlb_max_segment(void) { return io_tlb_default_mem ? max_segment : 0; @@ -652,6 +646,7 @@ bool is_swiotlb_active(void) { return io_tlb_default_mem != NULL; } +EXPORT_SYMBOL_GPL(is_swiotlb_active); #ifdef CONFIG_DEBUG_FS -- cgit v1.2.3-71-gd317 From 84fcfbdadbfdd86c9a43a52703203e05fe7efd92 Mon Sep 17 00:00:00 2001 From: Wang Qing Date: Fri, 12 Mar 2021 10:19:12 +0800 Subject: dma-mapping: remove a pointless empty line in dma_alloc_coherent Signed-off-by: Wang Qing Signed-off-by: Christoph Hellwig --- include/linux/dma-mapping.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 954847f9a3e0..e9d19b974f26 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -435,7 +435,6 @@ static inline void dma_sync_sgtable_for_device(struct device *dev, static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - return dma_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); } -- cgit v1.2.3-71-gd317 From a2bbe66493ee380eb25e080e7fcdd1278a847f7e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 7 Jul 2019 09:57:53 -0400 Subject: constify dentry argument of dentry_path()/dentry_path_raw() Signed-off-by: Al Viro --- fs/d_path.c | 10 +++++----- include/linux/dcache.h | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/fs/d_path.c b/fs/d_path.c index a69e2cd36e6e..270d62133996 100644 --- a/fs/d_path.c +++ b/fs/d_path.c @@ -326,9 +326,9 @@ char *simple_dname(struct dentry *dentry, char *buffer, int buflen) /* * Write full pathname from the root of the filesystem into the buffer. */ -static char *__dentry_path(struct dentry *d, char *buf, int buflen) +static char *__dentry_path(const struct dentry *d, char *buf, int buflen) { - struct dentry *dentry; + const struct dentry *dentry; char *end, *retval; int len, seq = 0; int error = 0; @@ -347,7 +347,7 @@ restart: *retval = '/'; read_seqbegin_or_lock(&rename_lock, &seq); while (!IS_ROOT(dentry)) { - struct dentry *parent = dentry->d_parent; + const struct dentry *parent = dentry->d_parent; prefetch(parent); error = prepend_name(&end, &len, &dentry->d_name); @@ -371,13 +371,13 @@ Elong: return ERR_PTR(-ENAMETOOLONG); } -char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) +char *dentry_path_raw(const struct dentry *dentry, char *buf, int buflen) { return __dentry_path(dentry, buf, buflen); } EXPORT_SYMBOL(dentry_path_raw); -char *dentry_path(struct dentry *dentry, char *buf, int buflen) +char *dentry_path(const struct dentry *dentry, char *buf, int buflen) { char *p = NULL; char *retval; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c1e48014106f..4ecde5d8250c 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -300,8 +300,8 @@ char *dynamic_dname(struct dentry *, char *, int, const char *, ...); extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); -extern char *dentry_path_raw(struct dentry *, char *, int); -extern char *dentry_path(struct dentry *, char *, int); +extern char *dentry_path_raw(const struct dentry *, char *, int); +extern char *dentry_path(const struct dentry *, char *, int); /* Allocation counts.. */ -- cgit v1.2.3-71-gd317 From 9666cec380d60808eb86d3be4caf84faeebe3081 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Mon, 7 Dec 2020 14:45:56 +0100 Subject: pwm: Drop function pwmchip_add_with_polarity() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pwmchip_add() only calls pwmchip_add_with_polarity() and nothing else. All other users of pwmchip_add_with_polarity() are gone. So drop pwmchip_add_with_polarity() and move the code instead to pwmchip_add(). The initial assignment to pwm->state.polarity is dropped. In every correct usage of the PWM API this value is overwritten later anyhow. Signed-off-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/core.c | 25 +++---------------------- include/linux/pwm.h | 2 -- 2 files changed, 3 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 25ee06a14bb3..c4d5c0667137 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -250,17 +250,14 @@ static bool pwm_ops_check(const struct pwm_chip *chip) } /** - * pwmchip_add_with_polarity() - register a new PWM chip + * pwmchip_add() - register a new PWM chip * @chip: the PWM chip to add - * @polarity: initial polarity of PWM channels * - * Register a new PWM chip. The initial polarity for all channels is specified - * by the @polarity parameter. + * Register a new PWM chip. * * Returns: 0 on success or a negative error code on failure. */ -int pwmchip_add_with_polarity(struct pwm_chip *chip, - enum pwm_polarity polarity) +int pwmchip_add(struct pwm_chip *chip) { struct pwm_device *pwm; unsigned int i; @@ -292,7 +289,6 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, pwm->chip = chip; pwm->pwm = chip->base + i; pwm->hwpwm = i; - pwm->state.polarity = polarity; radix_tree_insert(&pwm_tree, pwm->pwm, pwm); } @@ -315,21 +311,6 @@ out: return ret; } -EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity); - -/** - * pwmchip_add() - register a new PWM chip - * @chip: the PWM chip to add - * - * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base - * will be used. The initial polarity for all channels is normal. - * - * Returns: 0 on success or a negative error code on failure. - */ -int pwmchip_add(struct pwm_chip *chip) -{ - return pwmchip_add_with_polarity(chip, PWM_POLARITY_NORMAL); -} EXPORT_SYMBOL_GPL(pwmchip_add); /** diff --git a/include/linux/pwm.h b/include/linux/pwm.h index e4d84d4db293..8f4eefd129aa 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -392,8 +392,6 @@ int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); -int pwmchip_add_with_polarity(struct pwm_chip *chip, - enum pwm_polarity polarity); int pwmchip_add(struct pwm_chip *chip); int pwmchip_remove(struct pwm_chip *chip); struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, -- cgit v1.2.3-71-gd317 From f2cc020d7876de7583feb52ec939a32419cf9468 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 23 Mar 2021 18:49:35 +0100 Subject: tracing: Fix various typos in comments Fix ~59 single-word typos in the tracing code comments, and fix the grammar in a handful of places. Link: https://lore.kernel.org/r/20210322224546.GA1981273@gmail.com Link: https://lkml.kernel.org/r/20210323174935.GA4176821@gmail.com Reviewed-by: Randy Dunlap Signed-off-by: Ingo Molnar Signed-off-by: Steven Rostedt (VMware) --- arch/microblaze/include/asm/ftrace.h | 2 +- arch/nds32/kernel/ftrace.c | 2 +- arch/powerpc/include/asm/ftrace.h | 4 ++-- arch/sh/kernel/ftrace.c | 2 +- arch/sparc/include/asm/ftrace.h | 2 +- fs/tracefs/inode.c | 2 +- include/linux/ftrace.h | 4 ++-- include/linux/trace_events.h | 2 +- include/linux/tracepoint.h | 2 +- include/trace/events/io_uring.h | 2 +- include/trace/events/rcu.h | 2 +- include/trace/events/sched.h | 2 +- include/trace/events/timer.h | 2 +- kernel/trace/bpf_trace.c | 5 +++-- kernel/trace/fgraph.c | 4 ++-- kernel/trace/ftrace.c | 8 ++++---- kernel/trace/ring_buffer.c | 2 +- kernel/trace/synth_event_gen_test.c | 2 +- kernel/trace/trace.c | 18 +++++++++--------- kernel/trace/trace.h | 4 ++-- kernel/trace/trace_event_perf.c | 2 +- kernel/trace/trace_events.c | 4 ++-- kernel/trace/trace_events_filter.c | 4 ++-- kernel/trace/trace_events_synth.c | 2 +- kernel/trace/trace_functions_graph.c | 2 +- kernel/trace/trace_hwlat.c | 4 ++-- kernel/trace/trace_kprobe.c | 2 +- kernel/trace/trace_probe.c | 6 +++--- kernel/trace/trace_probe.h | 2 +- kernel/trace/trace_probe_tmpl.h | 2 +- kernel/trace/trace_selftest.c | 4 ++-- kernel/trace/trace_seq.c | 12 ++++++------ 32 files changed, 60 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h index 5db7f4489f05..6a92bed37794 100644 --- a/arch/microblaze/include/asm/ftrace.h +++ b/arch/microblaze/include/asm/ftrace.h @@ -13,7 +13,7 @@ extern void ftrace_call_graph(void); #endif #ifdef CONFIG_DYNAMIC_FTRACE -/* reloction of mcount call site is the same as the address */ +/* relocation of mcount call site is the same as the address */ static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index 414f8a780cc3..0e23e3a8df6b 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -236,7 +236,7 @@ void __naked return_to_handler(void) "bal ftrace_return_to_handler\n\t" "move $lp, $r0 \n\t" - /* restore state nedded by the ABI */ + /* restore state needed by the ABI */ "lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); } diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index bc76970b6ee5..debe8c4f7062 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -12,7 +12,7 @@ #ifdef __ASSEMBLY__ -/* Based off of objdump optput from glibc */ +/* Based off of objdump output from glibc */ #define MCOUNT_SAVE_FRAME \ stwu r1,-48(r1); \ @@ -52,7 +52,7 @@ extern void _mcount(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { - /* reloction of mcount call site is the same as the address */ + /* relocation of mcount call site is the same as the address */ return addr; } diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 0646c5961846..295c43315bbe 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -67,7 +67,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) * Modifying code must take extra care. On an SMP machine, if * the code being modified is also being executed on another CPU * that CPU will have undefined results and possibly take a GPF. - * We use kstop_machine to stop other CPUS from exectuing code. + * We use kstop_machine to stop other CPUS from executing code. * But this does not stop NMIs from happening. We still need * to protect against that. We separate out the modification of * the code to take care of this. diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h index d3aa1a524431..e284394cb3aa 100644 --- a/arch/sparc/include/asm/ftrace.h +++ b/arch/sparc/include/asm/ftrace.h @@ -17,7 +17,7 @@ void _mcount(void); #endif #ifdef CONFIG_DYNAMIC_FTRACE -/* reloction of mcount call site is the same as the address */ +/* relocation of mcount call site is the same as the address */ static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index 4b83cbded559..1261e8b41edb 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -477,7 +477,7 @@ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent) * * The instances directory is special as it allows for mkdir and rmdir to * to be done by userspace. When a mkdir or rmdir is performed, the inode - * locks are released and the methhods passed in (@mkdir and @rmdir) are + * locks are released and the methods passed in (@mkdir and @rmdir) are * called without locks and with the name of the directory being created * within the instances directory. * diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 86e5028bfa20..a69f363b61bf 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -33,7 +33,7 @@ /* * If the arch's mcount caller does not support all of ftrace's * features, then it must call an indirect function that - * does. Or at least does enough to prevent any unwelcomed side effects. + * does. Or at least does enough to prevent any unwelcome side effects. */ #if !ARCH_SUPPORTS_FTRACE_OPS # define FTRACE_FORCE_LIST_FUNC 1 @@ -389,7 +389,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer); */ static inline void stack_tracer_disable(void) { - /* Preemption or interupts must be disabled */ + /* Preemption or interrupts must be disabled */ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); this_cpu_inc(disable_stack_tracer); diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 8cba64ce23a4..36e27c1f42e0 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -206,7 +206,7 @@ static inline unsigned int tracing_gen_ctx_dec(void) trace_ctx = tracing_gen_ctx(); /* - * Subtract one from the preeption counter if preemption is enabled, + * Subtract one from the preemption counter if preemption is enabled, * see trace_event_buffer_reserve()for details. */ if (IS_ENABLED(CONFIG_PREEMPTION)) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 9cfb099da58f..13f65420f188 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -465,7 +465,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * * * * The declared 'local variable' is called '__entry' * * - * * __field(pid_t, prev_prid) is equivalent to a standard declariton: + * * __field(pid_t, prev_prid) is equivalent to a standard declaration: * * * * pid_t prev_pid; * * diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 9f0d3b7d56b0..ba78a5602cd1 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -49,7 +49,7 @@ TRACE_EVENT(io_uring_create, ); /** - * io_uring_register - called after a buffer/file/eventfd was succesfully + * io_uring_register - called after a buffer/file/eventfd was successfully * registered for a ring * * @ctx: pointer to a ring context structure diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 5fc29400e1a2..97177c10bf64 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -48,7 +48,7 @@ TRACE_EVENT(rcu_utilization, * RCU flavor, the grace-period number, and a string identifying the * grace-period-related event as follows: * - * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL. + * "AccReadyCB": CPU accelerates new callbacks to RCU_NEXT_READY_TAIL. * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL. * "newreq": Request a new grace period. * "start": Start a grace period. diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index cbe3e152d24c..1eca2305ca42 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -174,7 +174,7 @@ DEFINE_EVENT(sched_wakeup_template, sched_waking, TP_ARGS(p)); /* - * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG. + * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING. * It is not always called from the waking context. */ DEFINE_EVENT(sched_wakeup_template, sched_wakeup, diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 19abb6c3eb73..6ad031c71be7 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -119,7 +119,7 @@ TRACE_EVENT(timer_expire_entry, * When used in combination with the timer_expire_entry tracepoint we can * determine the runtime of the timer callback function. * - * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might + * NOTE: Do NOT dereference timer in TP_fast_assign. The pointer might * be invalid. We solely track the pointer. */ DEFINE_EVENT(timer_class, timer_expire_exit, diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index b0c45d923f0f..49f7ddb36149 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -658,7 +658,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ i++; - /* skip optional "[0 +-][num]" width formating field */ + /* skip optional "[0 +-][num]" width formatting field */ while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || fmt[i] == ' ') i++; @@ -748,7 +748,8 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, fmt_cnt++; } - /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give + /* + * The maximum we can have is MAX_SEQ_PRINTF_VARARGS parameters, so just give * all of them to seq_printf(). */ seq_printf(m, fmt, params[0], params[1], params[2], params[3], diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 29a6ebeebc9e..b8a0d1d564fb 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -42,7 +42,7 @@ bool ftrace_graph_is_dead(void) } /** - * ftrace_graph_stop - set to permanently disable function graph tracincg + * ftrace_graph_stop - set to permanently disable function graph tracing * * In case of an error int function graph tracing, this is called * to try to keep function graph tracing from causing any more harm. @@ -117,7 +117,7 @@ int function_graph_enter(unsigned long ret, unsigned long func, /* * Skip graph tracing if the return location is served by direct trampoline, - * since call sequence and return addresses is unpredicatable anymore. + * since call sequence and return addresses are unpredictable anyway. * Ex: BPF trampoline may call original function and may skip frame * depending on type of BPF programs attached. */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4d8e35575549..eb7d677cb466 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1045,7 +1045,7 @@ struct ftrace_ops global_ops = { }; /* - * Used by the stack undwinder to know about dynamic ftrace trampolines. + * Used by the stack unwinder to know about dynamic ftrace trampolines. */ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) { @@ -3000,7 +3000,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) * When the kernel is preemptive, tasks can be preempted * while on a ftrace trampoline. Just scheduling a task on * a CPU is not good enough to flush them. Calling - * synchornize_rcu_tasks() will wait for those tasks to + * synchronize_rcu_tasks() will wait for those tasks to * execute and either schedule voluntarily or enter user space. */ if (IS_ENABLED(CONFIG_PREEMPTION)) @@ -5373,7 +5373,7 @@ EXPORT_SYMBOL_GPL(modify_ftrace_direct); * @reset - non zero to reset all filters before applying this filter. * * Filters denote which functions should be enabled when tracing is enabled - * If @ip is NULL, it failes to update filter. + * If @ip is NULL, it fails to update filter. */ int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset) @@ -6325,7 +6325,7 @@ clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) } } -/* Clear any records from hashs */ +/* Clear any records from hashes */ static void clear_mod_from_hashes(struct ftrace_page *pg) { struct trace_array *tr; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1c61a8cd7b99..f4216df58e31 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3154,7 +3154,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) * is called before preempt_count() is updated, since the check will * be on the NORMAL bit, the TRANSITION bit will then be set. If an * NMI then comes in, it will set the NMI bit, but when the NMI code - * does the trace_recursive_unlock() it will clear the TRANSTION bit + * does the trace_recursive_unlock() it will clear the TRANSITION bit * and leave the NMI bit set. But this is fine, because the interrupt * code that set the TRANSITION bit will then clear the NMI bit when it * calls trace_recursive_unlock(). If another NMI comes in, it will diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c index a4b4bbf8c3bf..0b15e975d2c2 100644 --- a/kernel/trace/synth_event_gen_test.c +++ b/kernel/trace/synth_event_gen_test.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Test module for in-kernel sythetic event creation and generation. + * Test module for in-kernel synthetic event creation and generation. * * Copyright (C) 2019 Tom Zanussi */ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e32f5a49f1cf..c8e54b674d3e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -514,7 +514,7 @@ void trace_free_pid_list(struct trace_pid_list *pid_list) * @filtered_pids: The list of pids to check * @search_pid: The PID to find in @filtered_pids * - * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. + * Returns true if @search_pid is found in @filtered_pids, and false otherwise. */ bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) @@ -545,7 +545,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) { /* - * If filterd_no_pids is not empty, and the task's pid is listed + * If filtered_no_pids is not empty, and the task's pid is listed * in filtered_no_pids, then return true. * Otherwise, if filtered_pids is empty, that means we can * trace all tasks. If it has content, then only trace pids @@ -612,7 +612,7 @@ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) (*pos)++; - /* pid already is +1 of the actual prevous bit */ + /* pid already is +1 of the actual previous bit */ pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); /* Return pid + 1 to allow zero to be represented */ @@ -834,7 +834,7 @@ DEFINE_MUTEX(trace_types_lock); * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page - * (not reader page) in ring buffer, and this page will be rewrited + * (not reader page) in ring buffer, and this page will be rewritten * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. @@ -1520,7 +1520,7 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) #undef C #define C(a, b) b -/* These must match the bit postions in trace_iterator_flags */ +/* These must match the bit positions in trace_iterator_flags */ static const char *trace_options[] = { TRACE_FLAGS NULL @@ -3368,7 +3368,7 @@ int trace_array_vprintk(struct trace_array *tr, * buffer (use trace_printk() for that), as writing into the top level * buffer should only have events that can be individually disabled. * trace_printk() is only used for debugging a kernel, and should not - * be ever encorporated in normal use. + * be ever incorporated in normal use. * * trace_array_printk() can be used, as it will not add noise to the * top level tracing buffer. @@ -7533,11 +7533,11 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) * @cmd: The tracing command that caused the error * @str: The string to position the caret at within @cmd * - * Finds the position of the first occurence of @str within @cmd. The + * Finds the position of the first occurrence of @str within @cmd. The * return value can be passed to tracing_log_err() for caret placement * within @cmd. * - * Returns the index within @cmd of the first occurence of @str or 0 + * Returns the index within @cmd of the first occurrence of @str or 0 * if @str was not found. */ unsigned int err_pos(char *cmd, const char *str) @@ -9320,7 +9320,7 @@ int tracing_init_dentry(void) * As there may still be users that expect the tracing * files to exist in debugfs/tracing, we must automount * the tracefs file system there, so older tools still - * work with the newer kerenl. + * work with the newer kernel. */ tr->dir = debugfs_create_automount("tracing", NULL, trace_automount, NULL); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2952bd92bc62..5506424eae2a 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1347,7 +1347,7 @@ __event_trigger_test_discard(struct trace_event_file *file, /** * event_trigger_unlock_commit - handle triggers and finish event commit - * @file: The file pointer assoctiated to the event + * @file: The file pointer associated with the event * @buffer: The ring buffer that the event is being written to * @event: The event meta data in the ring buffer * @entry: The event itself @@ -1374,7 +1374,7 @@ event_trigger_unlock_commit(struct trace_event_file *file, /** * event_trigger_unlock_commit_regs - handle triggers and finish event commit - * @file: The file pointer assoctiated to the event + * @file: The file pointer associated with the event * @buffer: The ring buffer that the event is being written to * @event: The event meta data in the ring buffer * @entry: The event itself diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 288ad2c274fb..03be4435d103 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -16,7 +16,7 @@ static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS]; /* * Force it to be aligned to unsigned long to avoid misaligned accesses - * suprises + * surprises */ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) perf_trace_t; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f58106eaf8cb..80e96989770e 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2646,7 +2646,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len) } /* - * Since calls are grouped by systems, the likelyhood that the + * Since calls are grouped by systems, the likelihood that the * next call in the iteration belongs to the same system as the * previous call is high. As an optimization, we skip searching * for a map[] that matches the call's system if the last call @@ -2706,7 +2706,7 @@ __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) } /* - * Just create a decriptor for early init. A descriptor is required + * Just create a descriptor for early init. A descriptor is required * for enabling events at boot. We want to enable events before * the filesystem is initialized. */ diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index e91259f6a722..9730acf3c03e 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -296,7 +296,7 @@ enum { * and "FALSE" the program entry after that, we are now done with the first * pass. * - * Making the above "a || b && c" have a progam of: + * Making the above "a || b && c" have a program of: * prog[0] = { "a", 1, 2 } * prog[1] = { "b", 0, 2 } * prog[2] = { "c", 0, 3 } @@ -390,7 +390,7 @@ enum { * F: return FALSE * * As "r = a; if (!r) goto n5;" is obviously the same as - * "if (!a) goto n5;" without doing anything we can interperate the + * "if (!a) goto n5;" without doing anything we can interpret the * program as: * n1: if (!a) goto n5; * n2: if (!b) goto n5; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 8d71e6c83f10..2ac75eb6aa86 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -1385,7 +1385,7 @@ static int destroy_synth_event(struct synth_event *se) /** * synth_event_delete - Delete a synthetic event - * @event_name: The name of the new sythetic event + * @event_name: The name of the new synthetic event * * Delete a synthetic event that was created with synth_event_create(). * diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 0aa6e6faa943..0de6837722da 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -764,7 +764,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, * - we are inside irq code * - we just entered irq code * - * retunns 0 if + * returns 0 if * - funcgraph-interrupts option is set * - we are not inside irq code */ diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 34dc1a712dcb..632ef88131a9 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -83,7 +83,7 @@ struct hwlat_sample { u64 nmi_total_ts; /* Total time spent in NMIs */ struct timespec64 timestamp; /* wall time */ int nmi_count; /* # NMIs during this sample */ - int count; /* # of iteratons over threash */ + int count; /* # of iterations over thresh */ }; /* keep the global state somewhere. */ @@ -389,7 +389,7 @@ static int start_kthread(struct trace_array *tr) } /** - * stop_kthread - Inform the hardware latency samping/detector kthread to stop + * stop_kthread - Inform the hardware latency sampling/detector kthread to stop * * This kicks the running hardware latency sampling/detector kernel thread and * tells it to stop sampling now. Use this on unload and at system shutdown. diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 6fe770d86dc3..ea6178cb5e33 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1748,7 +1748,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE)) kretprobe_perf_func(tk, ri, regs); #endif - return 0; /* We don't tweek kernel, so just return 0 */ + return 0; /* We don't tweak kernel, so just return 0 */ } NOKPROBE_SYMBOL(kretprobe_dispatcher); diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index ec589a4612df..15413ad7cef2 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -168,7 +168,7 @@ void __trace_probe_log_err(int offset, int err_type) if (!trace_probe_log.argv) return; - /* Recalcurate the length and allocate buffer */ + /* Recalculate the length and allocate buffer */ for (i = 0; i < trace_probe_log.argc; i++) { if (i == trace_probe_log.index) pos = len; @@ -182,7 +182,7 @@ void __trace_probe_log_err(int offset, int err_type) /** * Set the error position is next to the last arg + space. * Note that len includes the terminal null and the cursor - * appaers at pos + 1. + * appears at pos + 1. */ pos = len; offset = 0; @@ -592,7 +592,7 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, } /* - * Since $comm and immediate string can not be dereferred, + * Since $comm and immediate string can not be dereferenced, * we can find those by strcmp. */ if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) { diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 7ce4027089ee..227d518e5ba5 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -134,7 +134,7 @@ struct fetch_type { size_t size; /* Byte size of type */ int is_signed; /* Signed flag */ print_type_func_t print; /* Print functions */ - const char *fmt; /* Fromat string */ + const char *fmt; /* Format string */ const char *fmttype; /* Name in format file */ }; diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index e5282828f4a6..f003c5d02a3a 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -167,7 +167,7 @@ array: return code->op == FETCH_OP_END ? ret : -EILSEQ; } -/* Sum up total data length for dynamic arraies (strings) */ +/* Sum up total data length for dynamic arrays (strings) */ static nokprobe_inline int __get_data_size(struct trace_probe *tp, struct pt_regs *regs) { diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 73ef12092250..adf7ef194005 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -878,7 +878,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) int ret; /* - * Now that the big kernel lock is no longer preemptable, + * Now that the big kernel lock is no longer preemptible, * and this is called with the BKL held, it will always * fail. If preemption is already disabled, simply * pass the test. When the BKL is removed, or becomes @@ -940,7 +940,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * int ret; /* - * Now that the big kernel lock is no longer preemptable, + * Now that the big kernel lock is no longer preemptible, * and this is called with the BKL held, it will always * fail. If preemption is already disabled, simply * pass the test. When the BKL is removed, or becomes diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c index 1d84fcc78e3e..9c90b3a7dce2 100644 --- a/kernel/trace/trace_seq.c +++ b/kernel/trace/trace_seq.c @@ -16,7 +16,7 @@ * The buffer size is currently PAGE_SIZE, although it may become dynamic * in the future. * - * A write to the buffer will either succed or fail. That is, unlike + * A write to the buffer will either succeed or fail. That is, unlike * sprintf() there will not be a partial write (well it may write into * the buffer but it wont update the pointers). This allows users to * try to write something into the trace_seq buffer and if it fails @@ -73,7 +73,7 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s) * @fmt: printf format string * * The tracer may use either sequence operations or its own - * copy to user routines. To simplify formating of a trace + * copy to user routines. To simplify formatting of a trace * trace_seq_printf() is used to store strings into a special * buffer (@s). Then the output may be either used by * the sequencer or pulled into another buffer. @@ -133,7 +133,7 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask); * @fmt: printf format string * * The tracer may use either sequence operations or its own - * copy to user routines. To simplify formating of a trace + * copy to user routines. To simplify formatting of a trace * trace_seq_printf is used to store strings into a special * buffer (@s). Then the output may be either used by * the sequencer or pulled into another buffer. @@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(trace_seq_puts); * @c: simple character to record * * The tracer may use either the sequence operations or its own - * copy to user routines. This function records a simple charater + * copy to user routines. This function records a simple character * into a special buffer (@s) for later retrieval by a sequencer * or other mechanism. */ @@ -348,7 +348,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path) EXPORT_SYMBOL_GPL(trace_seq_path); /** - * trace_seq_to_user - copy the squence buffer to user space + * trace_seq_to_user - copy the sequence buffer to user space * @s: trace sequence descriptor * @ubuf: The userspace memory location to copy to * @cnt: The amount to copy @@ -363,7 +363,7 @@ EXPORT_SYMBOL_GPL(trace_seq_path); * * On failure it returns -EBUSY if all of the content in the * sequence has been already read, which includes nothing in the - * sequenc (@s->len == @s->readpos). + * sequence (@s->len == @s->readpos). * * Returns -EFAULT if the copy to userspace fails. */ -- cgit v1.2.3-71-gd317 From 67196fea0fcef92b25608882f62f3985bc59f1fe Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 9 Mar 2021 11:37:31 +0200 Subject: irqdomain: Introduce irq_domain_create_simple() API Linus Walleij pointed out that ird_domain_add_simple() gained additional functionality and can't be anymore replaced with a simple conditional. In preparation to upgrade GPIO library to use fwnode, introduce irq_domain_create_simple() API which is functional equivalent to the existing irq_domain_add_simple(), but takes a pointer to the struct fwnode_handle as a parameter. While at it, amend documentation to mention irq_domain_create_*() functions where it makes sense. Signed-off-by: Andy Shevchenko Acked-by: Marc Zyngier Signed-off-by: Bartosz Golaszewski --- Documentation/core-api/irq/irq-domain.rst | 22 ++++++++++++---------- include/linux/irqdomain.h | 19 ++++++++++++++----- kernel/irq/irqdomain.c | 20 ++++++++++---------- 3 files changed, 36 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/irq/irq-domain.rst b/Documentation/core-api/irq/irq-domain.rst index a77c24c27f7b..8214e215a8bf 100644 --- a/Documentation/core-api/irq/irq-domain.rst +++ b/Documentation/core-api/irq/irq-domain.rst @@ -42,10 +42,10 @@ irq_domain usage ================ An interrupt controller driver creates and registers an irq_domain by -calling one of the irq_domain_add_*() functions (each mapping method -has a different allocator function, more on that later). The function -will return a pointer to the irq_domain on success. The caller must -provide the allocator function with an irq_domain_ops structure. +calling one of the irq_domain_add_*() or irq_domain_create_*() functions +(each mapping method has a different allocator function, more on that later). +The function will return a pointer to the irq_domain on success. The caller +must provide the allocator function with an irq_domain_ops structure. In most cases, the irq_domain will begin empty without any mappings between hwirq and IRQ numbers. Mappings are added to the irq_domain @@ -147,6 +147,7 @@ Legacy irq_domain_add_simple() irq_domain_add_legacy() irq_domain_add_legacy_isa() + irq_domain_create_simple() irq_domain_create_legacy() The Legacy mapping is a special case for drivers that already have a @@ -169,13 +170,13 @@ supported. For example, ISA controllers would use the legacy map for mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ numbers. -Most users of legacy mappings should use irq_domain_add_simple() which -will use a legacy domain only if an IRQ range is supplied by the -system and will otherwise use a linear domain mapping. The semantics -of this call are such that if an IRQ range is specified then +Most users of legacy mappings should use irq_domain_add_simple() or +irq_domain_create_simple() which will use a legacy domain only if an IRQ range +is supplied by the system and will otherwise use a linear domain mapping. +The semantics of this call are such that if an IRQ range is specified then descriptors will be allocated on-the-fly for it, and if no range is -specified it will fall through to irq_domain_add_linear() which means -*no* irq descriptors will be allocated. +specified it will fall through to irq_domain_add_linear() or +irq_domain_create_linear() which means *no* irq descriptors will be allocated. A typical use case for simple domains is where an irqchip provider is supporting both dynamic and static IRQ assignments. @@ -186,6 +187,7 @@ that the driver using the simple domain call irq_create_mapping() before any irq_find_mapping() since the latter will actually work for the static IRQ assignment case. +irq_domain_add_simple() and irq_domain_create_simple() as well as irq_domain_add_legacy() and irq_domain_create_legacy() are functionally equivalent, except for the first argument is different - the former accepts an Open Firmware specific 'struct device_node', while the latter diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 33cacc8af26d..1ad8d5328715 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -256,11 +256,11 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, irq_hw_number_t hwirq_max, int direct_max, const struct irq_domain_ops *ops, void *host_data); -struct irq_domain *irq_domain_add_simple(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data); +struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data); struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, @@ -325,6 +325,15 @@ static inline struct irq_domain *irq_find_host(struct device_node *node) return d; } +static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data); +} + /** * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index d10ab1d689d5..2681dc43813c 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -295,8 +295,8 @@ void irq_domain_update_bus_token(struct irq_domain *domain, EXPORT_SYMBOL_GPL(irq_domain_update_bus_token); /** - * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs - * @of_node: pointer to interrupt controller's device tree node. + * irq_domain_create_simple() - Register an irq_domain and optionally map a range of irqs + * @fwnode: firmware node for the interrupt controller * @size: total number of irqs in mapping * @first_irq: first number of irq block assigned to the domain, * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then @@ -312,15 +312,15 @@ EXPORT_SYMBOL_GPL(irq_domain_update_bus_token); * irqs get mapped dynamically on the fly. However, if the controller requires * static virq assignments (non-DT boot) then it will set that up correctly. */ -struct irq_domain *irq_domain_add_simple(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data) +struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data) { struct irq_domain *domain; - domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); + domain = __irq_domain_add(fwnode, size, size, 0, ops, host_data); if (!domain) return NULL; @@ -328,7 +328,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { /* attempt to allocated irq_descs */ int rc = irq_alloc_descs(first_irq, first_irq, size, - of_node_to_nid(of_node)); + of_node_to_nid(to_of_node(fwnode))); if (rc < 0) pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", first_irq); @@ -338,7 +338,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, return domain; } -EXPORT_SYMBOL_GPL(irq_domain_add_simple); +EXPORT_SYMBOL_GPL(irq_domain_create_simple); /** * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. -- cgit v1.2.3-71-gd317 From 2d93018fe67d42c44d65a898da2a6a5a0209b9ee Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 23 Mar 2021 15:19:05 -0700 Subject: gpiolib: some edits of kernel docs for clarity Fix a few typos and some punctuation. Also, change CONFIG_OF to CONFIG_OF_GPIO in one comment. Signed-off-by: Randy Dunlap Cc: Linus Walleij Cc: Bartosz Golaszewski Cc: linux-gpio@vger.kernel.org Reviewed-by: Linus Walleij [Bartosz: tweaked the commit message] Signed-off-by: Bartosz Golaszewski --- include/linux/gpio/driver.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 286de0520574..63283de9daeb 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -227,7 +227,7 @@ struct gpio_irq_chip { /** * @valid_mask: * - * If not %NULL holds bitmask of GPIOs which are valid to be included + * If not %NULL, holds bitmask of GPIOs which are valid to be included * in IRQ domain of the chip. */ unsigned long *valid_mask; @@ -346,7 +346,7 @@ struct gpio_irq_chip { * output. * * A gpio_chip can help platforms abstract various sources of GPIOs so - * they can all be accessed through a common programing interface. + * they can all be accessed through a common programming interface. * Example sources would be SOC controllers, FPGAs, multifunction * chips, dedicated GPIO expanders, and so on. * @@ -435,15 +435,15 @@ struct gpio_chip { /** * @valid_mask: * - * If not %NULL holds bitmask of GPIOs which are valid to be used + * If not %NULL, holds bitmask of GPIOs which are valid to be used * from the chip. */ unsigned long *valid_mask; #if defined(CONFIG_OF_GPIO) /* - * If CONFIG_OF is enabled, then all GPIO controllers described in the - * device tree automatically may have an OF translation + * If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in + * the device tree automatically may have an OF translation */ /** @@ -508,7 +508,7 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, * for GPIOs will fail rudely. * * gpiochip_add_data() must only be called after gpiolib initialization, - * ie after core_initcall(). + * i.e. after core_initcall(). * * If gc->base is negative, this requests dynamic assignment of * a range of valid GPIOs. -- cgit v1.2.3-71-gd317 From 9c7d24693d864f90b27aad5d15fbfe226c02898b Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Wed, 24 Mar 2021 09:19:02 +0100 Subject: gpio: guard gpiochip_irqchip_add_domain() with GPIOLIB_IRQCHIP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current code doesn't check if GPIOLIB_IRQCHIP is enabled, which results in a compilation error when trying to build gpio-regmap if CONFIG_GPIOLIB_IRQCHIP isn't enabled. Fixes: 6a45b0e2589f ("gpiolib: Introduce gpiochip_irqchip_add_domain()") Suggested-by: Michael Walle Signed-off-by: Álvaro Fernández Rojas Reviewed-by: Linus Walleij Reviewed-by: Michael Walle Acked-by: Bartosz Golaszewski Link: https://lore.kernel.org/r/20210324081923.20379-2-noltari@gmail.com Signed-off-by: Linus Walleij --- include/linux/gpio/driver.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 286de0520574..ecf0032a0995 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -624,8 +624,17 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain, bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset); +#ifdef CONFIG_GPIOLIB_IRQCHIP int gpiochip_irqchip_add_domain(struct gpio_chip *gc, struct irq_domain *domain); +#else +static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc, + struct irq_domain *domain) +{ + WARN_ON(1); + return -EINVAL; +} +#endif int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset); void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset); -- cgit v1.2.3-71-gd317 From d46bf9ec4596654f36245e3b14765bcb422be6ad Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Wed, 24 Mar 2021 09:19:03 +0100 Subject: gpio: regmap: set gpio_chip of_node MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is needed for properly registering GPIO regmap as a child of a regmap pin controller. Signed-off-by: Álvaro Fernández Rojas Reviewed-by: Michael Walle Reviewed-by: Andy Shevchenko Acked-by: Bartosz Golaszewski Link: https://lore.kernel.org/r/20210324081923.20379-3-noltari@gmail.com Signed-off-by: Linus Walleij --- drivers/gpio/gpio-regmap.c | 5 +++++ include/linux/gpio/regmap.h | 4 ++++ 2 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c index 5412cb3b0b2a..134cedf151a7 100644 --- a/drivers/gpio/gpio-regmap.c +++ b/drivers/gpio/gpio-regmap.c @@ -254,6 +254,11 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config chip->names = config->names; chip->label = config->label ?: dev_name(config->parent); +#if defined(CONFIG_OF_GPIO) + /* gpiolib will use of_node of the parent if chip->of_node is NULL */ + chip->of_node = to_of_node(config->fwnode); +#endif /* CONFIG_OF_GPIO */ + /* * If our regmap is fast_io we should probably set can_sleep to false. * Right now, the regmap doesn't save this property, nor is there any diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h index ad76f3d0a6ba..334dd928042b 100644 --- a/include/linux/gpio/regmap.h +++ b/include/linux/gpio/regmap.h @@ -4,6 +4,7 @@ #define _LINUX_GPIO_REGMAP_H struct device; +struct fwnode_handle; struct gpio_regmap; struct irq_domain; struct regmap; @@ -16,6 +17,8 @@ struct regmap; * @parent: The parent device * @regmap: The regmap used to access the registers * given, the name of the device is used + * @fwnode: (Optional) The firmware node. + * If not given, the fwnode of the parent is used. * @label: (Optional) Descriptive name for GPIO controller. * If not given, the name of the device is used. * @ngpio: Number of GPIOs @@ -57,6 +60,7 @@ struct regmap; struct gpio_regmap_config { struct device *parent; struct regmap *regmap; + struct fwnode_handle *fwnode; const char *label; int ngpio; -- cgit v1.2.3-71-gd317 From 6e085e0ac9cf16298b5fefe0b1893f98ef765812 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Dec 2020 14:09:24 +0800 Subject: arm/arm64: Probe for the presence of KVM hypervisor Although the SMCCC specification provides some limited functionality for describing the presence of hypervisor and firmware services, this is generally applicable only to functions designated as "Arm Architecture Service Functions" and no portable discovery mechanism is provided for standard hypervisor services, despite having a designated range of function identifiers reserved by the specification. In an attempt to avoid the need for additional firmware changes every time a new function is added, introduce a UID to identify the service provider as being compatible with KVM. Once this has been established, additional services can be discovered via a feature bitmap. Reviewed-by: Steven Price Signed-off-by: Will Deacon Signed-off-by: Jianyong Wu [maz: move code to its own file, plug it into PSCI] Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20201209060932.212364-2-jianyong.wu@arm.com --- arch/arm/include/asm/hypervisor.h | 3 +++ arch/arm64/include/asm/hypervisor.h | 3 +++ drivers/firmware/psci/psci.c | 2 ++ drivers/firmware/smccc/Makefile | 2 +- drivers/firmware/smccc/kvm_guest.c | 50 +++++++++++++++++++++++++++++++++++++ drivers/firmware/smccc/smccc.c | 1 + include/linux/arm-smccc.h | 25 +++++++++++++++++++ 7 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/smccc/kvm_guest.c (limited to 'include/linux') diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h index df8524365637..bd61502b9715 100644 --- a/arch/arm/include/asm/hypervisor.h +++ b/arch/arm/include/asm/hypervisor.h @@ -4,4 +4,7 @@ #include +void kvm_init_hyp_services(void); +bool kvm_arm_hyp_service_available(u32 func_id); + #endif diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h index f9cc1d021791..0ae427f352c8 100644 --- a/arch/arm64/include/asm/hypervisor.h +++ b/arch/arm64/include/asm/hypervisor.h @@ -4,4 +4,7 @@ #include +void kvm_init_hyp_services(void); +bool kvm_arm_hyp_service_available(u32 func_id); + #endif diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index f5fc429cae3f..69e296f02902 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -498,6 +499,7 @@ static int __init psci_probe(void) psci_init_cpu_suspend(); psci_init_system_suspend(); psci_init_system_reset2(); + kvm_init_hyp_services(); } return 0; diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile index 72ab84042832..40d19144a860 100644 --- a/drivers/firmware/smccc/Makefile +++ b/drivers/firmware/smccc/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 # -obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o +obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o kvm_guest.o obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c new file mode 100644 index 000000000000..2d3e866decaa --- /dev/null +++ b/drivers/firmware/smccc/kvm_guest.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "smccc: KVM: " fmt + +#include +#include +#include +#include + +#include + +static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { }; + +void __init kvm_init_hyp_services(void) +{ + struct arm_smccc_res res; + u32 val[4]; + + if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC) + return; + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res); + if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 || + res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 || + res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 || + res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3) + return; + + memset(&res, 0, sizeof(res)); + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID, &res); + + val[0] = lower_32_bits(res.a0); + val[1] = lower_32_bits(res.a1); + val[2] = lower_32_bits(res.a2); + val[3] = lower_32_bits(res.a3); + + bitmap_from_arr32(__kvm_arm_hyp_services, val, ARM_SMCCC_KVM_NUM_FUNCS); + + pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n", + res.a3, res.a2, res.a1, res.a0); +} + +bool kvm_arm_hyp_service_available(u32 func_id) +{ + if (func_id >= ARM_SMCCC_KVM_NUM_FUNCS) + return false; + + return test_bit(func_id, __kvm_arm_hyp_services); +} +EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available); diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c index d52bfc5ed5e4..028f81d702cc 100644 --- a/drivers/firmware/smccc/smccc.c +++ b/drivers/firmware/smccc/smccc.c @@ -8,6 +8,7 @@ #include #include #include +#include #include static u32 smccc_version = ARM_SMCCC_VERSION_1_0; diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 62c54234576c..1a27bd9493fe 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -55,6 +55,8 @@ #define ARM_SMCCC_OWNER_TRUSTED_OS 50 #define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 +#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01 + #define ARM_SMCCC_QUIRK_NONE 0 #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ @@ -87,6 +89,29 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_FUNC_QUERY_CALL_UID) + +/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */ +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU + +/* KVM "vendor specific" services */ +#define ARM_SMCCC_KVM_FUNC_FEATURES 0 +#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127 +#define ARM_SMCCC_KVM_NUM_FUNCS 128 + +#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_FEATURES) + #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 /* Paravirtualised time calls (defined by ARM DEN0057A) */ -- cgit v1.2.3-71-gd317 From 1fd3dde5e270ad08f1406f921c9a2cda154fcea9 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 30 Mar 2021 12:43:16 -0500 Subject: PCI: Add pci_disable_parity() Add pci_disable_parity() to disable reporting of parity errors for a device by clearing PCI_COMMAND_PARITY. The device will still set PCI_STATUS_DETECTED_PARITY when it detects a parity error or receives a Poisoned TLP, but it will not set PCI_STATUS_PARITY, which means it will not assert PERR# (conventional PCI) or report Poisoned TLPs (PCIe). Based-on: https://lore.kernel.org/linux-arm-kernel/d375987c-ea4f-dd98-4ef8-99b2fbfe7c33@gmail.com/ Based-on-patch-by: Heiner Kallweit Link: https://lore.kernel.org/r/20210330174318.1289680-2-helgaas@kernel.org Signed-off-by: Bjorn Helgaas --- drivers/pci/pci.c | 17 +++++++++++++++++ include/linux/pci.h | 1 + 2 files changed, 18 insertions(+) (limited to 'include/linux') diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 16a17215f633..b1845e5e5c8f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4453,6 +4453,23 @@ void pci_clear_mwi(struct pci_dev *dev) } EXPORT_SYMBOL(pci_clear_mwi); +/** + * pci_disable_parity - disable parity checking for device + * @dev: the PCI device to operate on + * + * Disable parity checking for device @dev + */ +void pci_disable_parity(struct pci_dev *dev) +{ + u16 cmd; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (cmd & PCI_COMMAND_PARITY) { + cmd &= ~PCI_COMMAND_PARITY; + pci_write_config_word(dev, PCI_COMMAND, cmd); + } +} + /** * pci_intx - enables/disables PCI INTx for device dev * @pdev: the PCI device to operate on diff --git a/include/linux/pci.h b/include/linux/pci.h index 86c799c97b77..4eaa773115da 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1201,6 +1201,7 @@ int __must_check pci_set_mwi(struct pci_dev *dev); int __must_check pcim_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); +void pci_disable_parity(struct pci_dev *dev); void pci_intx(struct pci_dev *dev, int enable); bool pci_check_and_mask_intx(struct pci_dev *dev); bool pci_check_and_unmask_intx(struct pci_dev *dev); -- cgit v1.2.3-71-gd317 From 26dbc7e299c7ebbb6a95e2c620b21b5280b37c57 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Thu, 18 Mar 2021 14:33:08 +0000 Subject: bug: Factor out a getter for a bug's file line There is some non-trivial config-based logic to get the file name and line number associated with a bug. Factor this out to a getter that can be resused. Signed-off-by: Andrew Scull Cc: Peter Zijlstra Cc: "Steven Rostedt (VMware)" Reviewed-by: Steven Rostedt (VMware) Acked-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210318143311.839894-3-ascull@google.com --- include/linux/bug.h | 3 +++ lib/bug.c | 27 +++++++++++++++++---------- 2 files changed, 20 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bug.h b/include/linux/bug.h index f639bd0122f3..e3841bee4c8d 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -36,6 +36,9 @@ static inline int is_warning_bug(const struct bug_entry *bug) return bug->flags & BUGFLAG_WARNING; } +void bug_get_file_line(struct bug_entry *bug, const char **file, + unsigned int *line); + struct bug_entry *find_bug(unsigned long bugaddr); enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); diff --git a/lib/bug.c b/lib/bug.c index b92da1f6e21b..e65398082cbc 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -127,6 +127,22 @@ static inline struct bug_entry *module_find_bug(unsigned long bugaddr) } #endif +void bug_get_file_line(struct bug_entry *bug, const char **file, + unsigned int *line) +{ + *file = NULL; + *line = 0; + +#ifdef CONFIG_DEBUG_BUGVERBOSE +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + *file = bug->file; +#else + *file = (const char *)bug + bug->file_disp; +#endif + *line = bug->line; +#endif +} + struct bug_entry *find_bug(unsigned long bugaddr) { struct bug_entry *bug; @@ -153,17 +169,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) disable_trace_on_warning(); - file = NULL; - line = 0; + bug_get_file_line(bug, &file, &line); -#ifdef CONFIG_DEBUG_BUGVERBOSE -#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS - file = bug->file; -#else - file = (const char *)bug + bug->file_disp; -#endif - line = bug->line; -#endif warning = (bug->flags & BUGFLAG_WARNING) != 0; once = (bug->flags & BUGFLAG_ONCE) != 0; done = (bug->flags & BUGFLAG_DONE) != 0; -- cgit v1.2.3-71-gd317 From 1decdb335c366fc0a1bae0db55c138c613cc9a1f Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Tue, 30 Mar 2021 11:40:55 +0800 Subject: tracing: Remove duplicate struct declaration in trace_events.h struct trace_array is declared twice. One has been declared at forward declaration. Remove the duplicate. Link: https://lkml.kernel.org/r/20210330034056.2266969-1-wanjiabing@vivo.com Signed-off-by: Wan Jiabing Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 36e27c1f42e0..ad413b382a3c 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -404,7 +404,6 @@ trace_get_fields(struct trace_event_call *event_call) return event_call->class->get_fields(event_call); } -struct trace_array; struct trace_subsystem_dir; enum { -- cgit v1.2.3-71-gd317 From f3ef7202ef7c705d640d1aeec3b286a641ac9186 Mon Sep 17 00:00:00 2001 From: "Yordan Karadzhov (VMware)" Date: Mon, 29 Mar 2021 16:03:31 +0300 Subject: tracing: Remove unused argument from "ring_buffer_time_stamp() The "cpu" parameter is not being used by the function. Link: https://lkml.kernel.org/r/20210329130331.199402-1-y.karadz@gmail.com Signed-off-by: Yordan Karadzhov (VMware) Signed-off-by: Steven Rostedt (VMware) --- include/linux/ring_buffer.h | 2 +- kernel/trace/ring_buffer.c | 2 +- kernel/trace/trace.c | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 057b7ed4fe24..dac53fd3afea 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -181,7 +181,7 @@ unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cp unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu); unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu); -u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu); +u64 ring_buffer_time_stamp(struct trace_buffer *buffer); void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, int cpu, u64 *ts); void ring_buffer_set_clock(struct trace_buffer *buffer, diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f4216df58e31..2c0ee6484990 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1080,7 +1080,7 @@ static inline u64 rb_time_stamp(struct trace_buffer *buffer) return ts << DEBUG_SHIFT; } -u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu) +u64 ring_buffer_time_stamp(struct trace_buffer *buffer) { u64 time; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3834de91fb97..507a30bf26e4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -771,7 +771,7 @@ static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) if (!buf->buffer) return trace_clock_local(); - ts = ring_buffer_time_stamp(buf->buffer, cpu); + ts = ring_buffer_time_stamp(buf->buffer); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; @@ -7174,7 +7174,7 @@ static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe) { if (rbe == this_cpu_read(trace_buffered_event)) - return ring_buffer_time_stamp(buffer, smp_processor_id()); + return ring_buffer_time_stamp(buffer); return ring_buffer_event_time_stamp(buffer, rbe); } @@ -8088,7 +8088,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); - t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); + t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); } else { @@ -8097,7 +8097,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); trace_seq_printf(s, "now ts: %llu\n", - ring_buffer_time_stamp(trace_buf->buffer, cpu)); + ring_buffer_time_stamp(trace_buf->buffer)); } cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); -- cgit v1.2.3-71-gd317 From a7f3d3d3600c8ed119eb0d2483de0062ce2e3707 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 26 Mar 2021 22:03:05 +0100 Subject: dma-mapping: add unlikely hint to error path in dma_mapping_error Zillions of drivers use the unlikely() hint when checking the result of dma_mapping_error(). This is an inline function anyway, so we can move the hint into the function and remove it from drivers over time. Signed-off-by: Heiner Kallweit Reviewed-by: Robin Murphy Signed-off-by: Christoph Hellwig --- include/linux/dma-mapping.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index e9d19b974f26..183e7103a66d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -95,7 +95,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { debug_dma_mapping_error(dev, dma_addr); - if (dma_addr == DMA_MAPPING_ERROR) + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) return -ENOMEM; return 0; } -- cgit v1.2.3-71-gd317 From d699ae4fc27496d01e8bc5ab2106bd79d1e7be92 Mon Sep 17 00:00:00 2001 From: Alexander Lochmann Date: Thu, 11 Feb 2021 10:51:55 +0100 Subject: ext4: updated locking documentation for journal_t Some members of transaction_t are allowed to be read without any lock being held if consistency doesn't matter. Based on LockDoc's findings, we extended the locking documentation of those members. Each one of them is marked with a short comment: "no lock for quick racy checks". Signed-off-by: Alexander Lochmann Signed-off-by: Horst Schirmeier Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/ad82c7a9-a624-4ed5-5ada-a6410c44c0b3@tu-dortmund.de Signed-off-by: Theodore Ts'o --- include/linux/jbd2.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 99d3cd051ac3..74710bb231a6 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -768,7 +768,8 @@ enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY}; struct journal_s { /** - * @j_flags: General journaling state flags [j_state_lock] + * @j_flags: General journaling state flags [j_state_lock, + * no lock for quick racy checks] */ unsigned long j_flags; @@ -808,7 +809,8 @@ struct journal_s /** * @j_barrier_count: * - * Number of processes waiting to create a barrier lock [j_state_lock] + * Number of processes waiting to create a barrier lock [j_state_lock, + * no lock for quick racy checks] */ int j_barrier_count; @@ -821,7 +823,8 @@ struct journal_s * @j_running_transaction: * * Transactions: The current running transaction... - * [j_state_lock] [caller holding open handle] + * [j_state_lock, no lock for quick racy checks] [caller holding + * open handle] */ transaction_t *j_running_transaction; @@ -1033,7 +1036,7 @@ struct journal_s * @j_commit_sequence: * * Sequence number of the most recently committed transaction - * [j_state_lock]. + * [j_state_lock, no lock for quick racy checks] */ tid_t j_commit_sequence; @@ -1041,7 +1044,7 @@ struct journal_s * @j_commit_request: * * Sequence number of the most recent transaction wanting commit - * [j_state_lock] + * [j_state_lock, no lock for quick racy checks] */ tid_t j_commit_request; -- cgit v1.2.3-71-gd317 From 3042b1b45c4106feff063932d4fd481c5009dbe1 Mon Sep 17 00:00:00 2001 From: Alexander Lochmann Date: Thu, 11 Feb 2021 18:14:10 +0100 Subject: Updated locking documentation for transaction_t Some members of transaction_t are allowed to be read without any lock being held if accessed from the correct context. We used LockDoc's findings to determine those members. Each member of them is marked with a short comment: "no lock needed for jbd2 thread". Signed-off-by: Alexander Lochmann Signed-off-by: Horst Schirmeier Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20210211171410.17984-1-alexander.lochmann@tu-dortmund.de Signed-off-by: Theodore Ts'o --- include/linux/jbd2.h | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 74710bb231a6..b9aa85081a40 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -594,18 +594,22 @@ struct transaction_s */ unsigned long t_log_start; - /* Number of buffers on the t_buffers list [j_list_lock] */ + /* + * Number of buffers on the t_buffers list [j_list_lock, no locks + * needed for jbd2 thread] + */ int t_nr_buffers; /* * Doubly-linked circular list of all buffers reserved but not yet - * modified by this transaction [j_list_lock] + * modified by this transaction [j_list_lock, no locks needed fo + * jbd2 thread] */ struct journal_head *t_reserved_list; /* * Doubly-linked circular list of all metadata buffers owned by this - * transaction [j_list_lock] + * transaction [j_list_lock, no locks needed for jbd2 thread] */ struct journal_head *t_buffers; @@ -629,9 +633,11 @@ struct transaction_s struct journal_head *t_checkpoint_io_list; /* - * Doubly-linked circular list of metadata buffers being shadowed by log - * IO. The IO buffers on the iobuf list and the shadow buffers on this - * list match each other one for one at all times. [j_list_lock] + * Doubly-linked circular list of metadata buffers being + * shadowed by log IO. The IO buffers on the iobuf list and + * the shadow buffers on this list match each other one for + * one at all times. [j_list_lock, no locks needed for jbd2 + * thread] */ struct journal_head *t_shadow_list; -- cgit v1.2.3-71-gd317 From d737e5d418706abf32f6de68c3e09958516d422f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 9 Feb 2021 16:04:15 -0500 Subject: SUNRPC: Set TCP_CORK until the transmit queue is empty When we have multiple RPC requests queued up, it makes sense to set the TCP_CORK option while the transmit queue is non-empty. Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 1 + net/sunrpc/xprt.c | 2 ++ net/sunrpc/xprtsock.c | 5 ++++- 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index d2e97ee802af..d81fe8b364d0 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -247,6 +247,7 @@ struct rpc_xprt { struct rpc_task * snd_task; /* Task blocked in send */ struct list_head xmit_queue; /* Send queue */ + atomic_long_t xmit_queuelen; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 691ccf8049a4..a853f75d4968 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1352,6 +1352,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) list_add_tail(&req->rq_xmit, &xprt->xmit_queue); INIT_LIST_HEAD(&req->rq_xmit2); out: + atomic_long_inc(&xprt->xmit_queuelen); set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); spin_unlock(&xprt->queue_lock); } @@ -1381,6 +1382,7 @@ xprt_request_dequeue_transmit_locked(struct rpc_task *task) } } else list_del(&req->rq_xmit2); + atomic_long_dec(&req->rq_xprt->xmit_queuelen); } /** diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index e35760f238a4..a64f5ed1edb4 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1018,6 +1018,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) * to cope with writespace callbacks arriving _after_ we have * called sendmsg(). */ req->rq_xtime = ktime_get(); + tcp_sock_set_cork(transport->inet, true); while (1) { status = xprt_sock_sendmsg(transport->sock, &msg, xdr, transport->xmit.offset, rm, &sent); @@ -1032,6 +1033,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req) if (likely(req->rq_bytes_sent >= msglen)) { req->rq_xmit_bytes_sent += transport->xmit.offset; transport->xmit.offset = 0; + if (atomic_long_read(&xprt->xmit_queuelen) == 1) + tcp_sock_set_cork(transport->inet, false); return 0; } @@ -2163,6 +2166,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) } xs_tcp_set_socket_timeouts(xprt, sock); + tcp_sock_set_nodelay(sk); write_lock_bh(&sk->sk_callback_lock); @@ -2177,7 +2181,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) /* socket options */ sock_reset_flag(sk, SOCK_LINGER); - tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; xprt_clear_connected(xprt); -- cgit v1.2.3-71-gd317 From aca01415e076aa96cca0f801f4420ee5c10c660d Mon Sep 17 00:00:00 2001 From: Bence Csókás Date: Wed, 31 Mar 2021 19:19:20 +0000 Subject: i2c: Add I2C_AQ_NO_REP_START adapter quirk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This quirk signifies that the adapter cannot do a repeated START, it always issues a STOP condition after transfers. Suggested-by: Wolfram Sang Signed-off-by: Bence Csókás Signed-off-by: Wolfram Sang --- include/linux/i2c.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 56622658b215..a670ae129f4b 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -687,6 +687,8 @@ struct i2c_adapter_quirks { #define I2C_AQ_NO_ZERO_LEN_READ BIT(5) #define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6) #define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE) +/* adapter cannot do repeated START */ +#define I2C_AQ_NO_REP_START BIT(7) /* * i2c_adapter is the structure used to identify a physical i2c bus along -- cgit v1.2.3-71-gd317 From d556435156b7970b8ce61b355df558a5168927cc Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 22 Mar 2021 11:21:38 +0100 Subject: jbd2: avoid -Wempty-body warnings Building with 'make W=1' shows a harmless -Wempty-body warning: fs/jbd2/recovery.c: In function 'fc_do_one_pass': fs/jbd2/recovery.c:267:75: error: suggest braces around empty body in an 'if' statement [-Werror=empty-body] 267 | jbd_debug(3, "Fast commit replay failed, err = %d\n", err); | ^ Change the empty dprintk() macros to no_printk(), which avoids this warning and adds format string checking. Signed-off-by: Arnd Bergmann Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20210322102152.95684-1-arnd@kernel.org Signed-off-by: Theodore Ts'o --- include/linux/jbd2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index b9aa85081a40..db0e1920cb12 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -61,7 +61,7 @@ void __jbd2_debug(int level, const char *file, const char *func, #define jbd_debug(n, fmt, a...) \ __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a) #else -#define jbd_debug(n, fmt, a...) /**/ +#define jbd_debug(n, fmt, a...) no_printk(fmt, ##a) #endif extern void *jbd2_alloc(size_t size, gfp_t flags); -- cgit v1.2.3-71-gd317 From 2cd87a7b293dedbbaea3b6739f95d428a2d9890d Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 5 Apr 2021 17:43:03 +0100 Subject: coresight: core: Add support for dedicated percpu sinks Add support for dedicated sinks that are bound to individual CPUs. (e.g, TRBE). To allow quicker access to the sink for a given CPU bound source, keep a percpu array of the sink devices. Also, add support for building a path to the CPU local sink from the ETM. This adds a new percpu sink type CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM. This new sink type is exclusively available and can only work with percpu source type device CORESIGHT_DEV_SUBTYPE_SOURCE_PROC. This defines a percpu structure that accommodates a single coresight_device which can be used to store an initialized instance from a sink driver. As these sinks are exclusively linked and dependent on corresponding percpu sources devices, they should also be the default sink device during a perf session. Outwards device connections are scanned while establishing paths between a source and a sink device. But such connections are not present for certain percpu source and sink devices which are exclusively linked and dependent. Build the path directly and skip connection scanning for such devices. Cc: Mathieu Poirier Cc: Mike Leach Cc: Suzuki K Poulose Tested-by: Suzuki K Poulose Reviewed-by: Mike Leach Signed-off-by: Anshuman Khandual [Moved the set/get percpu sink APIs from TRBE patch to here Fixed build break on arm32] Signed-off-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20210405164307.1720226-17-suzuki.poulose@arm.com Signed-off-by: Mathieu Poirier --- drivers/hwtracing/coresight/coresight-core.c | 29 ++++++++++++++++++++++++++-- drivers/hwtracing/coresight/coresight-priv.h | 3 +++ include/linux/coresight.h | 13 +++++++++++++ 3 files changed, 43 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 0062c8935653..55c645616bf6 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -23,6 +23,7 @@ #include "coresight-priv.h" static DEFINE_MUTEX(coresight_mutex); +DEFINE_PER_CPU(struct coresight_device *, csdev_sink); /** * struct coresight_node - elements of a path, from source to sink @@ -70,6 +71,18 @@ void coresight_remove_cti_ops(void) } EXPORT_SYMBOL_GPL(coresight_remove_cti_ops); +void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev) +{ + per_cpu(csdev_sink, cpu) = csdev; +} +EXPORT_SYMBOL_GPL(coresight_set_percpu_sink); + +struct coresight_device *coresight_get_percpu_sink(int cpu) +{ + return per_cpu(csdev_sink, cpu); +} +EXPORT_SYMBOL_GPL(coresight_get_percpu_sink); + static int coresight_id_match(struct device *dev, void *data) { int trace_id, i_trace_id; @@ -784,6 +797,14 @@ static int _coresight_build_path(struct coresight_device *csdev, if (csdev == sink) goto out; + if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) && + sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) { + if (_coresight_build_path(sink, sink, path) == 0) { + found = true; + goto out; + } + } + /* Not a sink - recursively explore each port found on this element */ for (i = 0; i < csdev->pdata->nr_outport; i++) { struct coresight_device *child_dev; @@ -999,8 +1020,12 @@ coresight_find_default_sink(struct coresight_device *csdev) int depth = 0; /* look for a default sink if we have not found for this device */ - if (!csdev->def_sink) - csdev->def_sink = coresight_find_sink(csdev, &depth); + if (!csdev->def_sink) { + if (coresight_is_percpu_source(csdev)) + csdev->def_sink = per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev)); + if (!csdev->def_sink) + csdev->def_sink = coresight_find_sink(csdev, &depth); + } return csdev->def_sink; } diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index f5f654ea2994..ff1dd2092ac5 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -232,4 +232,7 @@ coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode); void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev, struct coresight_device *ect_csdev); +void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev); +struct coresight_device *coresight_get_percpu_sink(int cpu); + #endif diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 976ec2697610..85008a65e21f 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -50,6 +50,7 @@ enum coresight_dev_subtype_sink { CORESIGHT_DEV_SUBTYPE_SINK_PORT, CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM, + CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM, }; enum coresight_dev_subtype_link { @@ -455,6 +456,18 @@ static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 o } #endif /* CONFIG_64BIT */ +static inline bool coresight_is_percpu_source(struct coresight_device *csdev) +{ + return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) && + (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC); +} + +static inline bool coresight_is_percpu_sink(struct coresight_device *csdev) +{ + return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SINK) && + (csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM); +} + extern struct coresight_device * coresight_register(struct coresight_desc *desc); extern void coresight_unregister(struct coresight_device *csdev); -- cgit v1.2.3-71-gd317 From 2e1a44c1c4acf209c0dd7bc04421d101b9e80d11 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 23 Mar 2021 09:05:57 +0800 Subject: iommu/vt-d: Remove svm_dev_ops The svm_dev_ops has never been referenced in the tree, and there's no plan to have anything to use it. Remove it to make the code neat. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210323010600.678627-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/svm.c | 15 +-------------- include/linux/intel-iommu.h | 3 --- include/linux/intel-svm.h | 7 ------- 3 files changed, 1 insertion(+), 24 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 46776db47d16..933fe11797a1 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -462,7 +462,6 @@ static void load_pasid(struct mm_struct *mm, u32 pasid) /* Caller must hold pasid_mutex, mm reference */ static int intel_svm_bind_mm(struct device *dev, unsigned int flags, - struct svm_dev_ops *ops, struct mm_struct *mm, struct intel_svm_dev **sd) { struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); @@ -512,10 +511,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, /* Find the matching device in svm list */ for_each_svm_dev(sdev, svm, dev) { - if (sdev->ops != ops) { - ret = -EBUSY; - goto out; - } sdev->users++; goto success; } @@ -550,7 +545,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, /* Finish the setup now we know we're keeping it */ sdev->users = 1; - sdev->ops = ops; init_rcu_head(&sdev->rcu); if (!svm) { @@ -1006,13 +1000,6 @@ invalid: mmap_read_unlock(svm->mm); mmput(svm->mm); bad_req: - WARN_ON(!sdev); - if (sdev && sdev->ops && sdev->ops->fault_cb) { - int rwxp = (req->rd_req << 3) | (req->wr_req << 2) | - (req->exe_req << 1) | (req->pm_req); - sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, - req->priv_data, rwxp, result); - } /* We get here in the error case where the PASID lookup failed, and these can be NULL. Do not use them below this point! */ sdev = NULL; @@ -1087,7 +1074,7 @@ intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata) if (drvdata) flags = *(unsigned int *)drvdata; mutex_lock(&pasid_mutex); - ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev); + ret = intel_svm_bind_mm(dev, flags, mm, &sdev); if (ret) sva = ERR_PTR(ret); else if (sdev) diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1732298ce888..e0f8c2ade3e8 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -769,14 +769,11 @@ u32 intel_svm_get_pasid(struct iommu_sva *handle); int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); -struct svm_dev_ops; - struct intel_svm_dev { struct list_head list; struct rcu_head rcu; struct device *dev; struct intel_iommu *iommu; - struct svm_dev_ops *ops; struct iommu_sva sva; u32 pasid; int users; diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 39d368a810b8..6c9d10c0fb1e 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -8,13 +8,6 @@ #ifndef __INTEL_SVM_H__ #define __INTEL_SVM_H__ -struct device; - -struct svm_dev_ops { - void (*fault_cb)(struct device *dev, u32 pasid, u64 address, - void *private, int rwxp, int response); -}; - /* Values for rxwp in fault_cb callback */ #define SVM_REQ_READ (1<<3) #define SVM_REQ_WRITE (1<<2) -- cgit v1.2.3-71-gd317 From 06905ea8319731036695cf1a4c53c12b0f9373cb Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 23 Mar 2021 09:05:58 +0800 Subject: iommu/vt-d: Remove SVM_FLAG_PRIVATE_PASID The SVM_FLAG_PRIVATE_PASID has never been referenced in the tree, and there's no plan to have anything to use it. So cleanup it. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210323010600.678627-4-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/svm.c | 40 ++++++++++++++++++---------------------- include/linux/intel-svm.h | 16 +++------------- 2 files changed, 21 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 933fe11797a1..f77184a9defb 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -465,9 +465,9 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, struct mm_struct *mm, struct intel_svm_dev **sd) { struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); + struct intel_svm *svm = NULL, *t; struct device_domain_info *info; struct intel_svm_dev *sdev; - struct intel_svm *svm = NULL; unsigned long iflags; int pasid_max; int ret; @@ -493,30 +493,26 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, } } - if (!(flags & SVM_FLAG_PRIVATE_PASID)) { - struct intel_svm *t; - - list_for_each_entry(t, &global_svm_list, list) { - if (t->mm != mm || (t->flags & SVM_FLAG_PRIVATE_PASID)) - continue; - - svm = t; - if (svm->pasid >= pasid_max) { - dev_warn(dev, - "Limited PASID width. Cannot use existing PASID %d\n", - svm->pasid); - ret = -ENOSPC; - goto out; - } + list_for_each_entry(t, &global_svm_list, list) { + if (t->mm != mm) + continue; - /* Find the matching device in svm list */ - for_each_svm_dev(sdev, svm, dev) { - sdev->users++; - goto success; - } + svm = t; + if (svm->pasid >= pasid_max) { + dev_warn(dev, + "Limited PASID width. Cannot use existing PASID %d\n", + svm->pasid); + ret = -ENOSPC; + goto out; + } - break; + /* Find the matching device in svm list */ + for_each_svm_dev(sdev, svm, dev) { + sdev->users++; + goto success; } + + break; } sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 6c9d10c0fb1e..10fa80eef13a 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -14,16 +14,6 @@ #define SVM_REQ_EXEC (1<<1) #define SVM_REQ_PRIV (1<<0) -/* - * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" - * PASID for the current process. Even if a PASID already exists, a new one - * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID - * will not be given to subsequent callers. This facility allows a driver to - * disambiguate between multiple device contexts which access the same MM, - * if there is no other way to do so. It should be used sparingly, if at all. - */ -#define SVM_FLAG_PRIVATE_PASID (1<<0) - /* * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only * for access to kernel addresses. No IOTLB flushes are automatically done @@ -35,18 +25,18 @@ * It is unlikely that we will ever hook into flush_tlb_kernel_range() to * do such IOTLB flushes automatically. */ -#define SVM_FLAG_SUPERVISOR_MODE (1<<1) +#define SVM_FLAG_SUPERVISOR_MODE BIT(0) /* * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest * processes. Compared to the host bind, the primary differences are: * 1. mm life cycle management * 2. fault reporting */ -#define SVM_FLAG_GUEST_MODE (1<<2) +#define SVM_FLAG_GUEST_MODE BIT(1) /* * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space, * which requires guest and host PASID translation at both directions. */ -#define SVM_FLAG_GUEST_PASID (1<<3) +#define SVM_FLAG_GUEST_PASID BIT(2) #endif /* __INTEL_SVM_H__ */ -- cgit v1.2.3-71-gd317 From 3431c3f660a39f6ced954548a59dba6541ce3eb1 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Thu, 25 Mar 2021 11:38:24 +0800 Subject: iommu: Fix a boundary issue to avoid performance drop After the change of patch ("iommu: Switch gather->end to the inclusive end"), the performace drops from 1600+K IOPS to 1200K in our kunpeng ARM64 platform. We find that the range [start1, end1) actually is joint from the range [end1, end2), but it is considered as disjoint after the change, so it needs more times of TLB sync, and spends more time on it. So fix the boundary issue to avoid performance drop. Fixes: 862c3715de8f ("iommu: Switch gather->end to the inclusive end") Signed-off-by: Xiang Chen Acked-by: Will Deacon Link: https://lore.kernel.org/r/1616643504-120688-1-git-send-email-chenxiang66@hisilicon.com Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5e7fe519430a..9ca6e6b8084d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -547,7 +547,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, * structure can be rewritten. */ if (gather->pgsize != size || - end < gather->start || start > gather->end) { + end + 1 < gather->start || start > gather->end + 1) { if (gather->pgsize) iommu_iotlb_sync(domain, gather); gather->pgsize = size; -- cgit v1.2.3-71-gd317 From f598a497bc7dfbec60270bca8b8408db3d23ac07 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 25 Mar 2021 20:29:58 +0800 Subject: iova: Add CPU hotplug handler to flush rcaches Like the Intel IOMMU driver already does, flush the per-IOVA domain CPU rcache when a CPU goes offline - there's no point in keeping it. Reviewed-by: Robin Murphy Signed-off-by: John Garry Link: https://lore.kernel.org/r/1616675401-151997-2-git-send-email-john.garry@huawei.com Signed-off-by: Joerg Roedel --- drivers/iommu/iova.c | 30 +++++++++++++++++++++++++++++- include/linux/cpuhotplug.h | 1 + include/linux/iova.h | 1 + 3 files changed, 31 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 471c48dd71e7..8542059ddef8 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -25,6 +25,17 @@ static void init_iova_rcaches(struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); static void fq_destroy_all_entries(struct iova_domain *iovad); static void fq_flush_timeout(struct timer_list *t); + +static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node) +{ + struct iova_domain *iovad; + + iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead); + + free_cpu_cached_iovas(cpu, iovad); + return 0; +} + static void free_global_cached_iovas(struct iova_domain *iovad); static struct iova *to_iova(struct rb_node *node) @@ -56,6 +67,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_insert_color(&iovad->anchor.node, &iovad->rbroot); + cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead); init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); @@ -299,10 +311,21 @@ int iova_cache_get(void) { mutex_lock(&iova_cache_mutex); if (!iova_cache_users) { + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL, + iova_cpuhp_dead); + if (ret) { + mutex_unlock(&iova_cache_mutex); + pr_err("Couldn't register cpuhp handler\n"); + return ret; + } + iova_cache = kmem_cache_create( "iommu_iova", sizeof(struct iova), 0, SLAB_HWCACHE_ALIGN, NULL); if (!iova_cache) { + cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD); mutex_unlock(&iova_cache_mutex); pr_err("Couldn't create iova cache\n"); return -ENOMEM; @@ -324,8 +347,10 @@ void iova_cache_put(void) return; } iova_cache_users--; - if (!iova_cache_users) + if (!iova_cache_users) { + cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD); kmem_cache_destroy(iova_cache); + } mutex_unlock(&iova_cache_mutex); } EXPORT_SYMBOL_GPL(iova_cache_put); @@ -648,6 +673,9 @@ void put_iova_domain(struct iova_domain *iovad) { struct iova *iova, *tmp; + cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, + &iovad->cpuhp_dead); + free_iova_flush_queue(iovad); free_iova_rcaches(iovad); rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f14adb882338..cedac9986557 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -58,6 +58,7 @@ enum cpuhp_state { CPUHP_NET_DEV_DEAD, CPUHP_PCI_XGENE_DEAD, CPUHP_IOMMU_INTEL_DEAD, + CPUHP_IOMMU_IOVA_DEAD, CPUHP_LUSTRE_CFS_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, diff --git a/include/linux/iova.h b/include/linux/iova.h index c834c01c0a5b..4be6c0ab4997 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -95,6 +95,7 @@ struct iova_domain { flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 when not */ + struct hlist_node cpuhp_dead; }; static inline unsigned long iova_size(struct iova *iova) -- cgit v1.2.3-71-gd317 From 363f266eeff6e22a09483dc922dccd7cd0b9fe9c Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 25 Mar 2021 20:29:59 +0800 Subject: iommu/vt-d: Remove IOVA domain rcache flushing for CPU offlining Now that the core code handles flushing per-IOVA domain CPU rcaches, remove the handling here. Reviewed-by: Lu Baolu Signed-off-by: John Garry Link: https://lore.kernel.org/r/1616675401-151997-3-git-send-email-john.garry@huawei.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 31 ------------------------------- include/linux/cpuhotplug.h | 1 - 2 files changed, 32 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 1c32522220bc..be5ea16ca26f 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4064,35 +4064,6 @@ static struct notifier_block intel_iommu_memory_nb = { .priority = 0 }; -static void free_all_cpu_cached_iovas(unsigned int cpu) -{ - int i; - - for (i = 0; i < g_num_of_iommus; i++) { - struct intel_iommu *iommu = g_iommus[i]; - struct dmar_domain *domain; - int did; - - if (!iommu) - continue; - - for (did = 0; did < cap_ndoms(iommu->cap); did++) { - domain = get_iommu_domain(iommu, (u16)did); - - if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA) - continue; - - iommu_dma_free_cpu_cached_iovas(cpu, &domain->domain); - } - } -} - -static int intel_iommu_cpu_dead(unsigned int cpu) -{ - free_all_cpu_cached_iovas(cpu); - return 0; -} - static void intel_disable_iommus(void) { struct intel_iommu *iommu = NULL; @@ -4387,8 +4358,6 @@ int __init intel_iommu_init(void) bus_set_iommu(&pci_bus_type, &intel_iommu_ops); if (si_domain && !hw_pass_through) register_memory_notifier(&intel_iommu_memory_nb); - cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL, - intel_iommu_cpu_dead); down_read(&dmar_global_lock); if (probe_acpi_namespace_devices()) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index cedac9986557..85996494bec1 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -57,7 +57,6 @@ enum cpuhp_state { CPUHP_PAGE_ALLOC_DEAD, CPUHP_NET_DEV_DEAD, CPUHP_PCI_XGENE_DEAD, - CPUHP_IOMMU_INTEL_DEAD, CPUHP_IOMMU_IOVA_DEAD, CPUHP_LUSTRE_CFS_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, -- cgit v1.2.3-71-gd317 From 149448b353e2517ecc6eced7d9f46e9f3e08b89e Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 25 Mar 2021 20:30:00 +0800 Subject: iommu: Delete iommu_dma_free_cpu_cached_iovas() Function iommu_dma_free_cpu_cached_iovas() no longer has any caller, so delete it. With that, function free_cpu_cached_iovas() may be made static. Signed-off-by: John Garry Reviewed-by: Robin Murphy Link: https://lore.kernel.org/r/1616675401-151997-4-git-send-email-john.garry@huawei.com Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 9 --------- drivers/iommu/iova.c | 3 ++- include/linux/dma-iommu.h | 5 ----- include/linux/iova.h | 5 ----- 4 files changed, 2 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index fdd095e1fa52..0af4048f9b3f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -64,15 +64,6 @@ static int __init iommu_dma_forcedac_setup(char *str) } early_param("iommu.forcedac", iommu_dma_forcedac_setup); -void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, - struct iommu_domain *domain) -{ - struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iova_domain *iovad = &cookie->iovad; - - free_cpu_cached_iovas(cpu, iovad); -} - static void iommu_dma_entry_dtor(unsigned long data) { struct page *freelist = (struct page *)data; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 8542059ddef8..c9726e1b9e11 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -22,6 +22,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn); static void init_iova_rcaches(struct iova_domain *iovad); +static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); static void fq_destroy_all_entries(struct iova_domain *iovad); static void fq_flush_timeout(struct timer_list *t); @@ -1040,7 +1041,7 @@ static void free_iova_rcaches(struct iova_domain *iovad) /* * free all the IOVA ranges cached by a cpu (used when cpu is unplugged) */ -void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) +static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) { struct iova_cpu_rcache *cpu_rcache; struct iova_rcache *rcache; diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 13d1f4c14d7b..6e75a2d689b4 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -83,10 +83,5 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he { } -static inline void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, - struct iommu_domain *domain) -{ -} - #endif /* CONFIG_IOMMU_DMA */ #endif /* __DMA_IOMMU_H */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 4be6c0ab4997..71d8a2de6635 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -157,7 +157,6 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); -void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); #else static inline int iova_cache_get(void) { @@ -234,10 +233,6 @@ static inline void put_iova_domain(struct iova_domain *iovad) { } -static inline void free_cpu_cached_iovas(unsigned int cpu, - struct iova_domain *iovad) -{ -} #endif #endif -- cgit v1.2.3-71-gd317 From 0d35309ab5e080095190965aa7cfc3ca8fb88af9 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 1 Apr 2021 17:47:10 +0200 Subject: iommu: Fix comment for struct iommu_fwspec Commit 986d5ecc5699 ("iommu: Move fwspec->iommu_priv to struct dev_iommu") removed iommu_priv from fwspec and commit 5702ee24182f ("ACPI/IORT: Check ATS capability in root complex nodes") added @flags. Update the struct doc. Acked-by: Jonathan Cameron Acked-by: Will Deacon Signed-off-by: Jean-Philippe Brucker Link: https://lore.kernel.org/r/20210401154718.307519-2-jean-philippe@linaro.org Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5e7fe519430a..1d422bf722a1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -571,7 +571,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev); * struct iommu_fwspec - per-device IOMMU instance data * @ops: ops for this device's IOMMU * @iommu_fwnode: firmware handle for this device's IOMMU - * @iommu_priv: IOMMU driver private data for this device + * @flags: IOMMU_FWSPEC_* flags * @num_pasid_bits: number of PASID bits supported by this device * @num_ids: number of associated device IDs * @ids: IDs which this device may present to the IOMMU -- cgit v1.2.3-71-gd317 From 434b73e61cc65cdd26618af6fa4736c2ba1eb29b Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 1 Apr 2021 17:47:11 +0200 Subject: iommu/arm-smmu-v3: Use device properties for pasid-num-bits The pasid-num-bits property shouldn't need a dedicated fwspec field, it's a job for device properties. Add properties for IORT, and access the number of PASID bits using device_property_read_u32(). Suggested-by: Robin Murphy Acked-by: Jonathan Cameron Acked-by: Will Deacon Reviewed-by: Eric Auger Signed-off-by: Jean-Philippe Brucker Acked-by: Hanjun Guo Link: https://lore.kernel.org/r/20210401154718.307519-3-jean-philippe@linaro.org Signed-off-by: Joerg Roedel --- drivers/acpi/arm64/iort.c | 13 +++++++------ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 3 ++- drivers/iommu/of_iommu.c | 5 ----- include/linux/iommu.h | 2 -- 4 files changed, 9 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 2494138a6905..3912a1f6058e 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -968,15 +968,16 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) static void iort_named_component_init(struct device *dev, struct acpi_iort_node *node) { + struct property_entry props[2] = {}; struct acpi_iort_named_component *nc; - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - - if (!fwspec) - return; nc = (struct acpi_iort_named_component *)node->node_data; - fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS, - nc->node_flags); + props[0] = PROPERTY_ENTRY_U32("pasid-num-bits", + FIELD_GET(ACPI_IORT_NC_PASID_BITS, + nc->node_flags)); + + if (device_add_properties(dev, props)) + dev_warn(dev, "Could not add device properties\n"); } static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 8594b4a83043..7edce914c45e 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2392,7 +2392,8 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) } } - master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits); + device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits); + master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits); /* * Note that PASID must be enabled before, and disabled after ATS: diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index e505b9130a1c..a9d2df001149 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -210,11 +210,6 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, of_pci_iommu_init, &info); } else { err = of_iommu_configure_device(master_np, dev, id); - - fwspec = dev_iommu_fwspec_get(dev); - if (!err && fwspec) - of_property_read_u32(master_np, "pasid-num-bits", - &fwspec->num_pasid_bits); } /* diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 1d422bf722a1..16ce75693d83 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -572,7 +572,6 @@ struct iommu_group *fsl_mc_device_group(struct device *dev); * @ops: ops for this device's IOMMU * @iommu_fwnode: firmware handle for this device's IOMMU * @flags: IOMMU_FWSPEC_* flags - * @num_pasid_bits: number of PASID bits supported by this device * @num_ids: number of associated device IDs * @ids: IDs which this device may present to the IOMMU */ @@ -580,7 +579,6 @@ struct iommu_fwspec { const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; u32 flags; - u32 num_pasid_bits; unsigned int num_ids; u32 ids[]; }; -- cgit v1.2.3-71-gd317 From 34b48c704d194738eef0893aa06e412bdc8a972f Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 1 Apr 2021 17:47:12 +0200 Subject: iommu: Separate IOMMU_DEV_FEAT_IOPF from IOMMU_DEV_FEAT_SVA Some devices manage I/O Page Faults (IOPF) themselves instead of relying on PCIe PRI or Arm SMMU stall. Allow their drivers to enable SVA without mandating IOMMU-managed IOPF. The other device drivers now need to first enable IOMMU_DEV_FEAT_IOPF before enabling IOMMU_DEV_FEAT_SVA. Enabling IOMMU_DEV_FEAT_IOPF on its own doesn't have any effect visible to the device driver, it is used in combination with other features. Reviewed-by: Eric Auger Reviewed-by: Lu Baolu Signed-off-by: Jean-Philippe Brucker Link: https://lore.kernel.org/r/20210401154718.307519-4-jean-philippe@linaro.org Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 16ce75693d83..45c4eb372f56 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -156,10 +156,24 @@ struct iommu_resv_region { enum iommu_resv_type type; }; -/* Per device IOMMU features */ +/** + * enum iommu_dev_features - Per device IOMMU features + * @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature + * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses + * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally + * enabling %IOMMU_DEV_FEAT_SVA requires + * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page + * Faults themselves instead of relying on the IOMMU. When + * supported, this feature must be enabled before and + * disabled after %IOMMU_DEV_FEAT_SVA. + * + * Device drivers query whether a feature is supported using + * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature(). + */ enum iommu_dev_features { - IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ - IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */ + IOMMU_DEV_FEAT_AUX, + IOMMU_DEV_FEAT_SVA, + IOMMU_DEV_FEAT_IOPF, }; #define IOMMU_PASID_INVALID (-1U) -- cgit v1.2.3-71-gd317 From fc36479db74e957c4696b605a32c4afaa15fa6cb Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 1 Apr 2021 17:47:15 +0200 Subject: iommu: Add a page fault handler Some systems allow devices to handle I/O Page Faults in the core mm. For example systems implementing the PCIe PRI extension or Arm SMMU stall model. Infrastructure for reporting these recoverable page faults was added to the IOMMU core by commit 0c830e6b3282 ("iommu: Introduce device fault report API"). Add a page fault handler for host SVA. IOMMU driver can now instantiate several fault workqueues and link them to IOPF-capable devices. Drivers can choose between a single global workqueue, one per IOMMU device, one per low-level fault queue, one per domain, etc. When it receives a fault event, most commonly in an IRQ handler, the IOMMU driver reports the fault using iommu_report_device_fault(), which calls the registered handler. The page fault handler then calls the mm fault handler, and reports either success or failure with iommu_page_response(). After the handler succeeds, the hardware retries the access. The iopf_param pointer could be embedded into iommu_fault_param. But putting iopf_param into the iommu_param structure allows us not to care about ordering between calls to iopf_queue_add_device() and iommu_register_device_fault_handler(). Tested-by: Lu Baolu Reviewed-by: Eric Auger Reviewed-by: Jacob Pan Reviewed-by: Jonathan Cameron Reviewed-by: Lu Baolu Signed-off-by: Jean-Philippe Brucker Link: https://lore.kernel.org/r/20210401154718.307519-7-jean-philippe@linaro.org Signed-off-by: Joerg Roedel --- drivers/iommu/Makefile | 1 + drivers/iommu/io-pgfault.c | 461 ++++++++++++++++++++++++++++++++++++++++++ drivers/iommu/iommu-sva-lib.h | 53 +++++ include/linux/iommu.h | 2 + 4 files changed, 517 insertions(+) create mode 100644 drivers/iommu/io-pgfault.c (limited to 'include/linux') diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 61bd30cd8369..60fafc23dee6 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_S390_IOMMU) += s390-iommu.o obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o +obj-$(CONFIG_IOMMU_SVA_LIB) += io-pgfault.o diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c new file mode 100644 index 000000000000..1df8c1dcae77 --- /dev/null +++ b/drivers/iommu/io-pgfault.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Handle device page faults + * + * Copyright (C) 2020 ARM Ltd. + */ + +#include +#include +#include +#include +#include + +#include "iommu-sva-lib.h" + +/** + * struct iopf_queue - IO Page Fault queue + * @wq: the fault workqueue + * @devices: devices attached to this queue + * @lock: protects the device list + */ +struct iopf_queue { + struct workqueue_struct *wq; + struct list_head devices; + struct mutex lock; +}; + +/** + * struct iopf_device_param - IO Page Fault data attached to a device + * @dev: the device that owns this param + * @queue: IOPF queue + * @queue_list: index into queue->devices + * @partial: faults that are part of a Page Request Group for which the last + * request hasn't been submitted yet. + */ +struct iopf_device_param { + struct device *dev; + struct iopf_queue *queue; + struct list_head queue_list; + struct list_head partial; +}; + +struct iopf_fault { + struct iommu_fault fault; + struct list_head list; +}; + +struct iopf_group { + struct iopf_fault last_fault; + struct list_head faults; + struct work_struct work; + struct device *dev; +}; + +static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, + enum iommu_page_response_code status) +{ + struct iommu_page_response resp = { + .version = IOMMU_PAGE_RESP_VERSION_1, + .pasid = iopf->fault.prm.pasid, + .grpid = iopf->fault.prm.grpid, + .code = status, + }; + + if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) && + (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID)) + resp.flags = IOMMU_PAGE_RESP_PASID_VALID; + + return iommu_page_response(dev, &resp); +} + +static enum iommu_page_response_code +iopf_handle_single(struct iopf_fault *iopf) +{ + vm_fault_t ret; + struct mm_struct *mm; + struct vm_area_struct *vma; + unsigned int access_flags = 0; + unsigned int fault_flags = FAULT_FLAG_REMOTE; + struct iommu_fault_page_request *prm = &iopf->fault.prm; + enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID; + + if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID)) + return status; + + mm = iommu_sva_find(prm->pasid); + if (IS_ERR_OR_NULL(mm)) + return status; + + mmap_read_lock(mm); + + vma = find_extend_vma(mm, prm->addr); + if (!vma) + /* Unmapped area */ + goto out_put_mm; + + if (prm->perm & IOMMU_FAULT_PERM_READ) + access_flags |= VM_READ; + + if (prm->perm & IOMMU_FAULT_PERM_WRITE) { + access_flags |= VM_WRITE; + fault_flags |= FAULT_FLAG_WRITE; + } + + if (prm->perm & IOMMU_FAULT_PERM_EXEC) { + access_flags |= VM_EXEC; + fault_flags |= FAULT_FLAG_INSTRUCTION; + } + + if (!(prm->perm & IOMMU_FAULT_PERM_PRIV)) + fault_flags |= FAULT_FLAG_USER; + + if (access_flags & ~vma->vm_flags) + /* Access fault */ + goto out_put_mm; + + ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL); + status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID : + IOMMU_PAGE_RESP_SUCCESS; + +out_put_mm: + mmap_read_unlock(mm); + mmput(mm); + + return status; +} + +static void iopf_handle_group(struct work_struct *work) +{ + struct iopf_group *group; + struct iopf_fault *iopf, *next; + enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; + + group = container_of(work, struct iopf_group, work); + + list_for_each_entry_safe(iopf, next, &group->faults, list) { + /* + * For the moment, errors are sticky: don't handle subsequent + * faults in the group if there is an error. + */ + if (status == IOMMU_PAGE_RESP_SUCCESS) + status = iopf_handle_single(iopf); + + if (!(iopf->fault.prm.flags & + IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) + kfree(iopf); + } + + iopf_complete_group(group->dev, &group->last_fault, status); + kfree(group); +} + +/** + * iommu_queue_iopf - IO Page Fault handler + * @fault: fault event + * @cookie: struct device, passed to iommu_register_device_fault_handler. + * + * Add a fault to the device workqueue, to be handled by mm. + * + * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard + * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't + * expect a response. It may be generated when disabling a PASID (issuing a + * PASID stop request) by some PCI devices. + * + * The PASID stop request is issued by the device driver before unbind(). Once + * it completes, no page request is generated for this PASID anymore and + * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1 + * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait + * for all outstanding page requests to come back with a response before + * completing the PASID stop request. Others do not wait for page responses, and + * instead issue this Stop Marker that tells us when the PASID can be + * reallocated. + * + * It is safe to discard the Stop Marker because it is an optimization. + * a. Page requests, which are posted requests, have been flushed to the IOMMU + * when the stop request completes. + * b. The IOMMU driver flushes all fault queues on unbind() before freeing the + * PASID. + * + * So even though the Stop Marker might be issued by the device *after* the stop + * request completes, outstanding faults will have been dealt with by the time + * the PASID is freed. + * + * Return: 0 on success and <0 on error. + */ +int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) +{ + int ret; + struct iopf_group *group; + struct iopf_fault *iopf, *next; + struct iopf_device_param *iopf_param; + + struct device *dev = cookie; + struct dev_iommu *param = dev->iommu; + + lockdep_assert_held(¶m->lock); + + if (fault->type != IOMMU_FAULT_PAGE_REQ) + /* Not a recoverable page fault */ + return -EOPNOTSUPP; + + /* + * As long as we're holding param->lock, the queue can't be unlinked + * from the device and therefore cannot disappear. + */ + iopf_param = param->iopf_param; + if (!iopf_param) + return -ENODEV; + + if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { + iopf = kzalloc(sizeof(*iopf), GFP_KERNEL); + if (!iopf) + return -ENOMEM; + + iopf->fault = *fault; + + /* Non-last request of a group. Postpone until the last one */ + list_add(&iopf->list, &iopf_param->partial); + + return 0; + } + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) { + /* + * The caller will send a response to the hardware. But we do + * need to clean up before leaving, otherwise partial faults + * will be stuck. + */ + ret = -ENOMEM; + goto cleanup_partial; + } + + group->dev = dev; + group->last_fault.fault = *fault; + INIT_LIST_HEAD(&group->faults); + list_add(&group->last_fault.list, &group->faults); + INIT_WORK(&group->work, iopf_handle_group); + + /* See if we have partial faults for this group */ + list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { + if (iopf->fault.prm.grpid == fault->prm.grpid) + /* Insert *before* the last fault */ + list_move(&iopf->list, &group->faults); + } + + queue_work(iopf_param->queue->wq, &group->work); + return 0; + +cleanup_partial: + list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { + if (iopf->fault.prm.grpid == fault->prm.grpid) { + list_del(&iopf->list); + kfree(iopf); + } + } + return ret; +} +EXPORT_SYMBOL_GPL(iommu_queue_iopf); + +/** + * iopf_queue_flush_dev - Ensure that all queued faults have been processed + * @dev: the endpoint whose faults need to be flushed. + * + * The IOMMU driver calls this before releasing a PASID, to ensure that all + * pending faults for this PASID have been handled, and won't hit the address + * space of the next process that uses this PASID. The driver must make sure + * that no new fault is added to the queue. In particular it must flush its + * low-level queue before calling this function. + * + * Return: 0 on success and <0 on error. + */ +int iopf_queue_flush_dev(struct device *dev) +{ + int ret = 0; + struct iopf_device_param *iopf_param; + struct dev_iommu *param = dev->iommu; + + if (!param) + return -ENODEV; + + mutex_lock(¶m->lock); + iopf_param = param->iopf_param; + if (iopf_param) + flush_workqueue(iopf_param->queue->wq); + else + ret = -ENODEV; + mutex_unlock(¶m->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(iopf_queue_flush_dev); + +/** + * iopf_queue_discard_partial - Remove all pending partial fault + * @queue: the queue whose partial faults need to be discarded + * + * When the hardware queue overflows, last page faults in a group may have been + * lost and the IOMMU driver calls this to discard all partial faults. The + * driver shouldn't be adding new faults to this queue concurrently. + * + * Return: 0 on success and <0 on error. + */ +int iopf_queue_discard_partial(struct iopf_queue *queue) +{ + struct iopf_fault *iopf, *next; + struct iopf_device_param *iopf_param; + + if (!queue) + return -EINVAL; + + mutex_lock(&queue->lock); + list_for_each_entry(iopf_param, &queue->devices, queue_list) { + list_for_each_entry_safe(iopf, next, &iopf_param->partial, + list) { + list_del(&iopf->list); + kfree(iopf); + } + } + mutex_unlock(&queue->lock); + return 0; +} +EXPORT_SYMBOL_GPL(iopf_queue_discard_partial); + +/** + * iopf_queue_add_device - Add producer to the fault queue + * @queue: IOPF queue + * @dev: device to add + * + * Return: 0 on success and <0 on error. + */ +int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) +{ + int ret = -EBUSY; + struct iopf_device_param *iopf_param; + struct dev_iommu *param = dev->iommu; + + if (!param) + return -ENODEV; + + iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL); + if (!iopf_param) + return -ENOMEM; + + INIT_LIST_HEAD(&iopf_param->partial); + iopf_param->queue = queue; + iopf_param->dev = dev; + + mutex_lock(&queue->lock); + mutex_lock(¶m->lock); + if (!param->iopf_param) { + list_add(&iopf_param->queue_list, &queue->devices); + param->iopf_param = iopf_param; + ret = 0; + } + mutex_unlock(¶m->lock); + mutex_unlock(&queue->lock); + + if (ret) + kfree(iopf_param); + + return ret; +} +EXPORT_SYMBOL_GPL(iopf_queue_add_device); + +/** + * iopf_queue_remove_device - Remove producer from fault queue + * @queue: IOPF queue + * @dev: device to remove + * + * Caller makes sure that no more faults are reported for this device. + * + * Return: 0 on success and <0 on error. + */ +int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) +{ + int ret = -EINVAL; + struct iopf_fault *iopf, *next; + struct iopf_device_param *iopf_param; + struct dev_iommu *param = dev->iommu; + + if (!param || !queue) + return -EINVAL; + + mutex_lock(&queue->lock); + mutex_lock(¶m->lock); + iopf_param = param->iopf_param; + if (iopf_param && iopf_param->queue == queue) { + list_del(&iopf_param->queue_list); + param->iopf_param = NULL; + ret = 0; + } + mutex_unlock(¶m->lock); + mutex_unlock(&queue->lock); + if (ret) + return ret; + + /* Just in case some faults are still stuck */ + list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) + kfree(iopf); + + kfree(iopf_param); + + return 0; +} +EXPORT_SYMBOL_GPL(iopf_queue_remove_device); + +/** + * iopf_queue_alloc - Allocate and initialize a fault queue + * @name: a unique string identifying the queue (for workqueue) + * + * Return: the queue on success and NULL on error. + */ +struct iopf_queue *iopf_queue_alloc(const char *name) +{ + struct iopf_queue *queue; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return NULL; + + /* + * The WQ is unordered because the low-level handler enqueues faults by + * group. PRI requests within a group have to be ordered, but once + * that's dealt with, the high-level function can handle groups out of + * order. + */ + queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name); + if (!queue->wq) { + kfree(queue); + return NULL; + } + + INIT_LIST_HEAD(&queue->devices); + mutex_init(&queue->lock); + + return queue; +} +EXPORT_SYMBOL_GPL(iopf_queue_alloc); + +/** + * iopf_queue_free - Free IOPF queue + * @queue: queue to free + * + * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or + * adding/removing devices on this queue anymore. + */ +void iopf_queue_free(struct iopf_queue *queue) +{ + struct iopf_device_param *iopf_param, *next; + + if (!queue) + return; + + list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list) + iopf_queue_remove_device(queue, iopf_param->dev); + + destroy_workqueue(queue->wq); + kfree(queue); +} +EXPORT_SYMBOL_GPL(iopf_queue_free); diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h index b40990aef3fd..031155010ca8 100644 --- a/drivers/iommu/iommu-sva-lib.h +++ b/drivers/iommu/iommu-sva-lib.h @@ -12,4 +12,57 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max); void iommu_sva_free_pasid(struct mm_struct *mm); struct mm_struct *iommu_sva_find(ioasid_t pasid); +/* I/O Page fault */ +struct device; +struct iommu_fault; +struct iopf_queue; + +#ifdef CONFIG_IOMMU_SVA_LIB +int iommu_queue_iopf(struct iommu_fault *fault, void *cookie); + +int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev); +int iopf_queue_remove_device(struct iopf_queue *queue, + struct device *dev); +int iopf_queue_flush_dev(struct device *dev); +struct iopf_queue *iopf_queue_alloc(const char *name); +void iopf_queue_free(struct iopf_queue *queue); +int iopf_queue_discard_partial(struct iopf_queue *queue); + +#else /* CONFIG_IOMMU_SVA_LIB */ +static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) +{ + return -ENODEV; +} + +static inline int iopf_queue_add_device(struct iopf_queue *queue, + struct device *dev) +{ + return -ENODEV; +} + +static inline int iopf_queue_remove_device(struct iopf_queue *queue, + struct device *dev) +{ + return -ENODEV; +} + +static inline int iopf_queue_flush_dev(struct device *dev) +{ + return -ENODEV; +} + +static inline struct iopf_queue *iopf_queue_alloc(const char *name) +{ + return NULL; +} + +static inline void iopf_queue_free(struct iopf_queue *queue) +{ +} + +static inline int iopf_queue_discard_partial(struct iopf_queue *queue) +{ + return -ENODEV; +} +#endif /* CONFIG_IOMMU_SVA_LIB */ #endif /* _IOMMU_SVA_LIB_H */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 45c4eb372f56..86d688c4418f 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -367,6 +367,7 @@ struct iommu_fault_param { * struct dev_iommu - Collection of per-device IOMMU data * * @fault_param: IOMMU detected device fault reporting data + * @iopf_param: I/O Page Fault queue and data * @fwspec: IOMMU fwspec data * @iommu_dev: IOMMU device this device is linked to * @priv: IOMMU Driver private data @@ -377,6 +378,7 @@ struct iommu_fault_param { struct dev_iommu { struct mutex lock; struct iommu_fault_param *fault_param; + struct iopf_device_param *iopf_param; struct iommu_fwspec *fwspec; struct iommu_device *iommu_dev; void *priv; -- cgit v1.2.3-71-gd317 From 47685cb202d1aff6f70a2bb91e8271392fefea84 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:37 +0200 Subject: iommu: remove the unused domain_window_disable method domain_window_disable is wired up by fsl_pamu, but never actually called. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-2-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/fsl_pamu_domain.c | 48 ----------------------------------------- include/linux/iommu.h | 2 -- 2 files changed, 50 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index b2110767caf4..53380cf1fa45 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -473,53 +473,6 @@ static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr) return ret; } -static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr) -{ - struct device_domain_info *info; - int ret = 0; - - list_for_each_entry(info, &dma_domain->devices, link) { - if (dma_domain->win_cnt == 1 && dma_domain->enabled) { - ret = pamu_disable_liodn(info->liodn); - if (!ret) - dma_domain->enabled = 0; - } else { - ret = pamu_disable_spaace(info->liodn, wnd_nr); - } - } - - return ret; -} - -static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - unsigned long flags; - int ret; - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - if (!dma_domain->win_arr) { - pr_debug("Number of windows not configured\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return; - } - - if (wnd_nr >= dma_domain->win_cnt) { - pr_debug("Invalid window index\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return; - } - - if (dma_domain->win_arr[wnd_nr].valid) { - ret = disable_domain_win(dma_domain, wnd_nr); - if (!ret) { - dma_domain->win_arr[wnd_nr].valid = 0; - dma_domain->mapped--; - } - } - - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); -} static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) @@ -1032,7 +985,6 @@ static const struct iommu_ops fsl_pamu_ops = { .attach_dev = fsl_pamu_attach_device, .detach_dev = fsl_pamu_detach_device, .domain_window_enable = fsl_pamu_window_enable, - .domain_window_disable = fsl_pamu_window_disable, .iova_to_phys = fsl_pamu_iova_to_phys, .domain_set_attr = fsl_pamu_set_domain_attr, .domain_get_attr = fsl_pamu_get_domain_attr, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 86d688c4418f..565b8810354d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -223,7 +223,6 @@ struct iommu_iotlb_gather { * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges * @domain_window_enable: Configure and enable a particular window for a domain - * @domain_window_disable: Disable a particular window for a domain * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) @@ -284,7 +283,6 @@ struct iommu_ops { /* Window handling functions */ int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot); - void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); -- cgit v1.2.3-71-gd317 From 392825e0c76cf9aca33e5a3bf981cde2a2c87251 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:38 +0200 Subject: iommu/fsl_pamu: remove fsl_pamu_get_domain_attr None of the values returned by this function are ever queried. Also remove the DOMAIN_ATTR_FSL_PAMUV1 enum value that is not otherwise used. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-3-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/fsl_pamu_domain.c | 30 ------------------------------ include/linux/iommu.h | 4 ---- 2 files changed, 34 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 53380cf1fa45..e587ec43f7e7 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -832,35 +832,6 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, return ret; } -static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, - enum iommu_attr attr_type, void *data) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - int ret = 0; - - switch (attr_type) { - case DOMAIN_ATTR_FSL_PAMU_STASH: - memcpy(data, &dma_domain->dma_stash, - sizeof(struct pamu_stash_attribute)); - break; - case DOMAIN_ATTR_FSL_PAMU_ENABLE: - *(int *)data = dma_domain->enabled; - break; - case DOMAIN_ATTR_FSL_PAMUV1: - *(int *)data = DOMAIN_ATTR_FSL_PAMUV1; - break; - case DOMAIN_ATTR_WINDOWS: - *(u32 *)data = dma_domain->win_cnt; - break; - default: - pr_debug("Unsupported attribute type\n"); - ret = -EINVAL; - break; - } - - return ret; -} - static struct iommu_group *get_device_iommu_group(struct device *dev) { struct iommu_group *group; @@ -987,7 +958,6 @@ static const struct iommu_ops fsl_pamu_ops = { .domain_window_enable = fsl_pamu_window_enable, .iova_to_phys = fsl_pamu_iova_to_phys, .domain_set_attr = fsl_pamu_set_domain_attr, - .domain_get_attr = fsl_pamu_get_domain_attr, .probe_device = fsl_pamu_probe_device, .release_device = fsl_pamu_release_device, .device_group = fsl_pamu_device_group, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 565b8810354d..95ce205347a9 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -104,9 +104,6 @@ enum iommu_cap { * -the actual size of the mapped region of a window must be power * of 2 starting with 4KB and physical address must be naturally * aligned. - * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. - * The caller can invoke iommu_domain_get_attr to check if the underlying - * iommu implementation supports these constraints. */ enum iommu_attr { @@ -115,7 +112,6 @@ enum iommu_attr { DOMAIN_ATTR_WINDOWS, DOMAIN_ATTR_FSL_PAMU_STASH, DOMAIN_ATTR_FSL_PAMU_ENABLE, - DOMAIN_ATTR_FSL_PAMUV1, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, -- cgit v1.2.3-71-gd317 From ba58d1216e2b2d2320b50591b767f50b13c623a8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:41 +0200 Subject: iommu/fsl_pamu: remove support for multiple windows The only domains allocated forces use of a single window. Remove all the code related to multiple window support, as well as the need for qman_portal to force a single window. Remove the now unused DOMAIN_ATTR_WINDOWS iommu_attr. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-6-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/fsl_pamu.c | 264 ++-------------------------------- drivers/iommu/fsl_pamu.h | 10 +- drivers/iommu/fsl_pamu_domain.c | 275 +++++++----------------------------- drivers/iommu/fsl_pamu_domain.h | 16 +-- drivers/soc/fsl/qbman/qman_portal.c | 7 - include/linux/iommu.h | 1 - 6 files changed, 60 insertions(+), 513 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index b9a974d97831..3e1647cd5ad4 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -63,19 +63,6 @@ static const struct of_device_id l3_device_ids[] = { /* maximum subwindows permitted per liodn */ static u32 max_subwindow_count; -/* Pool for fspi allocation */ -static struct gen_pool *spaace_pool; - -/** - * pamu_get_max_subwin_cnt() - Return the maximum supported - * subwindow count per liodn. - * - */ -u32 pamu_get_max_subwin_cnt(void) -{ - return max_subwindow_count; -} - /** * pamu_get_ppaace() - Return the primary PACCE * @liodn: liodn PAACT index for desired PAACE @@ -155,13 +142,6 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) return fls64(addrspace_size) - 2; } -/* Derive the PAACE window count encoding for the subwindow count */ -static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt) -{ - /* window count is 2^(WCE+1) bytes */ - return __ffs(subwindow_cnt) - 1; -} - /* * Set the PAACE type as primary and set the coherency required domain * attribute @@ -174,89 +154,11 @@ static void pamu_init_ppaace(struct paace *ppaace) PAACE_M_COHERENCE_REQ); } -/* - * Set the PAACE type as secondary and set the coherency required domain - * attribute. - */ -static void pamu_init_spaace(struct paace *spaace) -{ - set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY); - set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR, - PAACE_M_COHERENCE_REQ); -} - -/* - * Return the spaace (corresponding to the secondary window index) - * for a particular ppaace. - */ -static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum) -{ - u32 subwin_cnt; - struct paace *spaace = NULL; - - subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1); - - if (wnum < subwin_cnt) - spaace = &spaact[paace->fspi + wnum]; - else - pr_debug("secondary paace out of bounds\n"); - - return spaace; -} - -/** - * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows - * required for primary PAACE in the secondary - * PAACE table. - * @subwin_cnt: Number of subwindows to be reserved. - * - * A PPAACE entry may have a number of associated subwindows. A subwindow - * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores - * the index (fspi) of the first SPAACE entry in the SPAACT table. This - * function returns the index of the first SPAACE entry. The remaining - * SPAACE entries are reserved contiguously from that index. - * - * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success. - * If no SPAACE entry is available or the allocator can not reserve the required - * number of contiguous entries function returns ULONG_MAX indicating a failure. - * - */ -static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt) -{ - unsigned long spaace_addr; - - spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace)); - if (!spaace_addr) - return ULONG_MAX; - - return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace)); -} - -/* Release the subwindows reserved for a particular LIODN */ -void pamu_free_subwins(int liodn) -{ - struct paace *ppaace; - u32 subwin_cnt, size; - - ppaace = pamu_get_ppaace(liodn); - if (!ppaace) { - pr_debug("Invalid liodn entry\n"); - return; - } - - if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) { - subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1); - size = (subwin_cnt - 1) * sizeof(struct paace); - gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size); - set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0); - } -} - /* * Function used for updating stash destination for the coressponding * LIODN. */ -int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) +int pamu_update_paace_stash(int liodn, u32 value) { struct paace *paace; @@ -265,11 +167,6 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) pr_debug("Invalid liodn entry\n"); return -ENOENT; } - if (subwin) { - paace = pamu_get_spaace(paace, subwin - 1); - if (!paace) - return -ENOENT; - } set_bf(paace->impl_attr, PAACE_IA_CID, value); mb(); @@ -277,31 +174,6 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) return 0; } -/* Disable a subwindow corresponding to the LIODN */ -int pamu_disable_spaace(int liodn, u32 subwin) -{ - struct paace *paace; - - paace = pamu_get_ppaace(liodn); - if (!paace) { - pr_debug("Invalid liodn entry\n"); - return -ENOENT; - } - if (subwin) { - paace = pamu_get_spaace(paace, subwin - 1); - if (!paace) - return -ENOENT; - set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID); - } else { - set_bf(paace->addr_bitfields, PAACE_AF_AP, - PAACE_AP_PERMS_DENIED); - } - - mb(); - - return 0; -} - /** * pamu_config_paace() - Sets up PPAACE entry for specified liodn * @@ -314,17 +186,15 @@ int pamu_disable_spaace(int liodn, u32 subwin) * stashid not defined * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then * snoopid not defined - * @subwin_cnt: number of sub-windows * @prot: window permissions * * Returns 0 upon success else error code < 0 returned */ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, u32 omi, unsigned long rpn, u32 snoopid, u32 stashid, - u32 subwin_cnt, int prot) + int prot) { struct paace *ppaace; - unsigned long fspi; if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) { pr_debug("window size too small or not a power of two %pa\n", @@ -368,116 +238,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, if (~snoopid != 0) ppaace->domain_attr.to_host.snpid = snoopid; - if (subwin_cnt) { - /* The first entry is in the primary PAACE instead */ - fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1); - if (fspi == ULONG_MAX) { - pr_debug("spaace indexes exhausted\n"); - return -EINVAL; - } - - /* window count is 2^(WCE+1) bytes */ - set_bf(ppaace->impl_attr, PAACE_IA_WCE, - map_subwindow_cnt_to_wce(subwin_cnt)); - set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1); - ppaace->fspi = fspi; - } else { - set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE); - ppaace->twbah = rpn >> 20; - set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn); - set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot); - set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0); - set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0); - } - mb(); - - return 0; -} - -/** - * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow - * - * @liodn: Logical IO device number - * @subwin_cnt: number of sub-windows associated with dma-window - * @subwin: subwindow index - * @subwin_size: size of subwindow - * @omi: Operation mapping index - * @rpn: real (true physical) page number - * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then - * snoopid not defined - * @stashid: cache stash id for associated cpu - * @enable: enable/disable subwindow after reconfiguration - * @prot: sub window permissions - * - * Returns 0 upon success else error code < 0 returned - */ -int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin, - phys_addr_t subwin_size, u32 omi, unsigned long rpn, - u32 snoopid, u32 stashid, int enable, int prot) -{ - struct paace *paace; - - /* setup sub-windows */ - if (!subwin_cnt) { - pr_debug("Invalid subwindow count\n"); - return -EINVAL; - } - - paace = pamu_get_ppaace(liodn); - if (subwin > 0 && subwin < subwin_cnt && paace) { - paace = pamu_get_spaace(paace, subwin - 1); - - if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) { - pamu_init_spaace(paace); - set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn); - } - } - - if (!paace) { - pr_debug("Invalid liodn entry\n"); - return -ENOENT; - } - - if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) { - pr_debug("subwindow size out of range, or not a power of 2\n"); - return -EINVAL; - } - - if (rpn == ULONG_MAX) { - pr_debug("real page number out of range\n"); - return -EINVAL; - } - - /* window size is 2^(WSE+1) bytes */ - set_bf(paace->win_bitfields, PAACE_WIN_SWSE, - map_addrspace_size_to_wse(subwin_size)); - - set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE); - paace->twbah = rpn >> 20; - set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn); - set_bf(paace->addr_bitfields, PAACE_AF_AP, prot); - - /* configure snoop id */ - if (~snoopid != 0) - paace->domain_attr.to_host.snpid = snoopid; - - /* set up operation mapping if it's configured */ - if (omi < OME_NUMBER_ENTRIES) { - set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); - paace->op_encode.index_ot.omi = omi; - } else if (~omi != 0) { - pr_debug("bad operation mapping index: %d\n", omi); - return -EINVAL; - } - - if (~stashid != 0) - set_bf(paace->impl_attr, PAACE_IA_CID, stashid); - - smp_wmb(); - - if (enable) - set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID); - + set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE); + ppaace->twbah = rpn >> 20; + set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn); + set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot); + set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0); + set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0); mb(); return 0; @@ -1129,17 +895,6 @@ static int fsl_pamu_probe(struct platform_device *pdev) spaact_phys = virt_to_phys(spaact); omt_phys = virt_to_phys(omt); - spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1); - if (!spaace_pool) { - ret = -ENOMEM; - dev_err(dev, "Failed to allocate spaace gen pool\n"); - goto error; - } - - ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1); - if (ret) - goto error_genpool; - pamubypenr = in_be32(&guts_regs->pamubypenr); for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; @@ -1167,9 +922,6 @@ static int fsl_pamu_probe(struct platform_device *pdev) return 0; -error_genpool: - gen_pool_destroy(spaace_pool); - error: if (irq != NO_IRQ) free_irq(irq, data); diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index e1496ba96160..04fd843d718d 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h @@ -383,18 +383,12 @@ struct ome { int pamu_domain_init(void); int pamu_enable_liodn(int liodn); int pamu_disable_liodn(int liodn); -void pamu_free_subwins(int liodn); int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, u32 omi, unsigned long rpn, u32 snoopid, uint32_t stashid, - u32 subwin_cnt, int prot); -int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr, - phys_addr_t subwin_size, u32 omi, unsigned long rpn, - uint32_t snoopid, u32 stashid, int enable, int prot); + int prot); u32 get_stash_id(u32 stash_dest_hint, u32 vcpu); void get_ome_index(u32 *omi_index, struct device *dev); -int pamu_update_paace_stash(int liodn, u32 subwin, u32 value); -int pamu_disable_spaace(int liodn, u32 subwin); -u32 pamu_get_max_subwin_cnt(void); +int pamu_update_paace_stash(int liodn, u32 value); #endif /* __FSL_PAMU_H */ diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index a4da5597755d..e6bdd38fc184 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -56,65 +56,19 @@ static int __init iommu_init_mempool(void) static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) { - u32 win_cnt = dma_domain->win_cnt; struct dma_window *win_ptr = &dma_domain->win_arr[0]; struct iommu_domain_geometry *geom; geom = &dma_domain->iommu_domain.geometry; - if (!win_cnt) { - pr_debug("Number of windows/geometry not configured for the domain\n"); - return 0; - } - - if (win_cnt > 1) { - u64 subwin_size; - dma_addr_t subwin_iova; - u32 wnd; - - subwin_size = (geom->aperture_end + 1) >> ilog2(win_cnt); - subwin_iova = iova & ~(subwin_size - 1); - wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size); - win_ptr = &dma_domain->win_arr[wnd]; - } - if (win_ptr->valid) return win_ptr->paddr + (iova & (win_ptr->size - 1)); return 0; } -static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) -{ - struct dma_window *sub_win_ptr = &dma_domain->win_arr[0]; - int i, ret; - unsigned long rpn, flags; - - for (i = 0; i < dma_domain->win_cnt; i++) { - if (sub_win_ptr[i].valid) { - rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT; - spin_lock_irqsave(&iommu_lock, flags); - ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, - sub_win_ptr[i].size, - ~(u32)0, - rpn, - dma_domain->snoop_id, - dma_domain->stash_id, - (i > 0) ? 1 : 0, - sub_win_ptr[i].prot); - spin_unlock_irqrestore(&iommu_lock, flags); - if (ret) { - pr_debug("SPAACE configuration failed for liodn %d\n", - liodn); - return ret; - } - } - } - - return ret; -} - -static int map_win(int liodn, struct fsl_dma_domain *dma_domain) +/* Map the DMA window corresponding to the LIODN */ +static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) { int ret; struct dma_window *wnd = &dma_domain->win_arr[0]; @@ -127,7 +81,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain) ~(u32)0, wnd->paddr >> PAMU_PAGE_SHIFT, dma_domain->snoop_id, dma_domain->stash_id, - 0, wnd->prot); + wnd->prot); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) pr_debug("PAACE configuration failed for liodn %d\n", liodn); @@ -135,50 +89,27 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain) return ret; } -/* Map the DMA window corresponding to the LIODN */ -static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) -{ - if (dma_domain->win_cnt > 1) - return map_subwins(liodn, dma_domain); - else - return map_win(liodn, dma_domain); -} - /* Update window/subwindow mapping for the LIODN */ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr) { int ret; struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; + phys_addr_t wnd_addr; unsigned long flags; spin_lock_irqsave(&iommu_lock, flags); - if (dma_domain->win_cnt > 1) { - ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr, - wnd->size, - ~(u32)0, - wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, - dma_domain->stash_id, - (wnd_nr > 0) ? 1 : 0, - wnd->prot); - if (ret) - pr_debug("Subwindow reconfiguration failed for liodn %d\n", - liodn); - } else { - phys_addr_t wnd_addr; - wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; + wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; - ret = pamu_config_ppaace(liodn, wnd_addr, - wnd->size, - ~(u32)0, - wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, dma_domain->stash_id, - 0, wnd->prot); - if (ret) - pr_debug("Window reconfiguration failed for liodn %d\n", - liodn); - } + ret = pamu_config_ppaace(liodn, wnd_addr, + wnd->size, + ~(u32)0, + wnd->paddr >> PAMU_PAGE_SHIFT, + dma_domain->snoop_id, dma_domain->stash_id, + wnd->prot); + if (ret) + pr_debug("Window reconfiguration failed for liodn %d\n", + liodn); spin_unlock_irqrestore(&iommu_lock, flags); @@ -192,21 +123,12 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, unsigned long flags; spin_lock_irqsave(&iommu_lock, flags); - if (!dma_domain->win_arr) { - pr_debug("Windows not configured, stash destination update failed for liodn %d\n", - liodn); + ret = pamu_update_paace_stash(liodn, val); + if (ret) { + pr_debug("Failed to update SPAACE %d field for liodn %d\n ", + i, liodn); spin_unlock_irqrestore(&iommu_lock, flags); - return -EINVAL; - } - - for (i = 0; i < dma_domain->win_cnt; i++) { - ret = pamu_update_paace_stash(liodn, i, val); - if (ret) { - pr_debug("Failed to update SPAACE %d field for liodn %d\n ", - i, liodn); - spin_unlock_irqrestore(&iommu_lock, flags); - return ret; - } + return ret; } spin_unlock_irqrestore(&iommu_lock, flags); @@ -217,14 +139,12 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, /* Set the geometry parameters for a LIODN */ static int pamu_set_liodn(int liodn, struct device *dev, struct fsl_dma_domain *dma_domain, - struct iommu_domain_geometry *geom_attr, - u32 win_cnt) + struct iommu_domain_geometry *geom_attr) { phys_addr_t window_addr, window_size; - phys_addr_t subwin_size; - int ret = 0, i; u32 omi_index = ~(u32)0; unsigned long flags; + int ret; /* * Configure the omi_index at the geometry setup time. @@ -241,34 +161,14 @@ static int pamu_set_liodn(int liodn, struct device *dev, if (!ret) ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index, 0, dma_domain->snoop_id, - dma_domain->stash_id, win_cnt, 0); + dma_domain->stash_id, 0); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) { - pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n", - liodn, win_cnt); + pr_debug("PAACE configuration failed for liodn %d\n", + liodn); return ret; } - if (win_cnt > 1) { - subwin_size = window_size >> ilog2(win_cnt); - for (i = 0; i < win_cnt; i++) { - spin_lock_irqsave(&iommu_lock, flags); - ret = pamu_disable_spaace(liodn, i); - if (!ret) - ret = pamu_config_spaace(liodn, win_cnt, i, - subwin_size, omi_index, - 0, dma_domain->snoop_id, - dma_domain->stash_id, - 0, 0); - spin_unlock_irqrestore(&iommu_lock, flags); - if (ret) { - pr_debug("SPAACE configuration failed for liodn %d\n", - liodn); - return ret; - } - } - } - return ret; } @@ -292,14 +192,12 @@ static int check_size(u64 size, dma_addr_t iova) return 0; } -static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) +static void remove_device_ref(struct device_domain_info *info) { unsigned long flags; list_del(&info->link); spin_lock_irqsave(&iommu_lock, flags); - if (win_cnt > 1) - pamu_free_subwins(info->liodn); pamu_disable_liodn(info->liodn); spin_unlock_irqrestore(&iommu_lock, flags); spin_lock_irqsave(&device_domain_lock, flags); @@ -317,7 +215,7 @@ static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) /* Remove the device from the domain device list */ list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { if (!dev || (info->dev == dev)) - remove_device_ref(info, dma_domain->win_cnt); + remove_device_ref(info); } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); } @@ -399,7 +297,6 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) dma_domain->stash_id = ~(u32)0; dma_domain->snoop_id = ~(u32)0; - dma_domain->win_cnt = pamu_get_max_subwin_cnt(); INIT_LIST_HEAD(&dma_domain->devices); spin_lock_init(&dma_domain->domain_lock); @@ -411,24 +308,6 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) return &dma_domain->iommu_domain; } -/* Configure geometry settings for all LIODNs associated with domain */ -static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain, - struct iommu_domain_geometry *geom_attr, - u32 win_cnt) -{ - struct device_domain_info *info; - int ret = 0; - - list_for_each_entry(info, &dma_domain->devices, link) { - ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, - geom_attr, win_cnt); - if (ret) - break; - } - - return ret; -} - /* Update stash destination for all LIODNs associated with the domain */ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) { @@ -475,39 +354,30 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, pamu_prot |= PAACE_AP_PERMS_UPDATE; spin_lock_irqsave(&dma_domain->domain_lock, flags); - if (!dma_domain->win_arr) { - pr_debug("Number of windows not configured\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -ENODEV; - } - - if (wnd_nr >= dma_domain->win_cnt) { + if (wnd_nr > 0) { pr_debug("Invalid window index\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } - win_size = (domain->geometry.aperture_end + 1) >> - ilog2(dma_domain->win_cnt); + win_size = (domain->geometry.aperture_end + 1) >> ilog2(1); if (size > win_size) { pr_debug("Invalid window size\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } - if (dma_domain->win_cnt == 1) { - if (dma_domain->enabled) { - pr_debug("Disable the window before updating the mapping\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EBUSY; - } + if (dma_domain->enabled) { + pr_debug("Disable the window before updating the mapping\n"); + spin_unlock_irqrestore(&dma_domain->domain_lock, flags); + return -EBUSY; + } - ret = check_size(size, domain->geometry.aperture_start); - if (ret) { - pr_debug("Aperture start not aligned to the size\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } + ret = check_size(size, domain->geometry.aperture_start); + if (ret) { + pr_debug("Aperture start not aligned to the size\n"); + spin_unlock_irqrestore(&dma_domain->domain_lock, flags); + return -EINVAL; } wnd = &dma_domain->win_arr[wnd_nr]; @@ -560,22 +430,18 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, * for the domain. If yes, set the geometry for * the LIODN. */ - if (dma_domain->win_arr) { - u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; - - ret = pamu_set_liodn(liodn[i], dev, dma_domain, - &domain->geometry, win_cnt); + ret = pamu_set_liodn(liodn[i], dev, dma_domain, + &domain->geometry); + if (ret) + break; + if (dma_domain->mapped) { + /* + * Create window/subwindow mapping for + * the LIODN. + */ + ret = map_liodn(liodn[i], dma_domain); if (ret) break; - if (dma_domain->mapped) { - /* - * Create window/subwindow mapping for - * the LIODN. - */ - ret = map_liodn(liodn[i], dma_domain); - if (ret) - break; - } } } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); @@ -706,48 +572,6 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en return 0; } -static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - unsigned long flags; - int ret; - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - /* Ensure domain is inactive i.e. DMA should be disabled for the domain */ - if (dma_domain->enabled) { - pr_debug("Can't set geometry attributes as domain is active\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EBUSY; - } - - /* - * Ensure we have valid window count i.e. it should be less than - * maximum permissible limit and should be a power of two. - */ - if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) { - pr_debug("Invalid window count\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } - - ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, - w_count > 1 ? w_count : 0); - if (!ret) { - kfree(dma_domain->win_arr); - dma_domain->win_arr = kcalloc(w_count, - sizeof(*dma_domain->win_arr), - GFP_ATOMIC); - if (!dma_domain->win_arr) { - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -ENOMEM; - } - dma_domain->win_cnt = w_count; - } - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - - return ret; -} - static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, enum iommu_attr attr_type, void *data) { @@ -761,9 +585,6 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, case DOMAIN_ATTR_FSL_PAMU_ENABLE: ret = configure_domain_dma_state(dma_domain, *(int *)data); break; - case DOMAIN_ATTR_WINDOWS: - ret = fsl_pamu_set_windows(domain, *(u32 *)data); - break; default: pr_debug("Unsupported attribute type\n"); ret = -EINVAL; diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index 53d359d66fe5..d3523ee9999d 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -17,23 +17,11 @@ struct dma_window { }; struct fsl_dma_domain { - /* - * Number of windows assocaited with this domain. - * During domain initialization, it is set to the - * the maximum number of subwindows allowed for a LIODN. - * Minimum value for this is 1 indicating a single PAMU - * window, without any sub windows. Value can be set/ - * queried by set_attr/get_attr API for DOMAIN_ATTR_WINDOWS. - * Value can only be set once the geometry has been configured. - */ - u32 win_cnt; /* * win_arr contains information of the configured - * windows for a domain. This is allocated only - * when the number of windows for the domain are - * set. + * windows for a domain. */ - struct dma_window *win_arr; + struct dma_window win_arr[1]; /* list of devices associated with the domain */ struct list_head devices; /* dma_domain states: diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index c958e6310d30..3d56ec4b373b 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -55,13 +55,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); goto no_iommu; } - ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS, - &window_count); - if (ret < 0) { - dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, - ret); - goto out_domain_free; - } stash_attr.cpu = cpu; stash_attr.cache = PAMU_ATTR_CACHE_L1; ret = iommu_domain_set_attr(pcfg->iommu_domain, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 95ce205347a9..761aa8b69936 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -109,7 +109,6 @@ enum iommu_cap { enum iommu_attr { DOMAIN_ATTR_GEOMETRY, DOMAIN_ATTR_PAGING, - DOMAIN_ATTR_WINDOWS, DOMAIN_ATTR_FSL_PAMU_STASH, DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_NESTING, /* two stages of translation */ -- cgit v1.2.3-71-gd317 From 376dfd2a2ff41596a6efc8ea56f8b0de172b04a6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:42 +0200 Subject: iommu/fsl_pamu: remove ->domain_window_enable The only thing that fsl_pamu_window_enable does for the current caller is to fill in the prot value in the only dma_window structure, and to propagate a few values from the iommu_domain_geometry struture into the dma_window. Remove the dma_window entirely, hardcode the prot value and otherwise use the iommu_domain_geometry structure instead. Remove the now unused ->domain_window_enable iommu method. Signed-off-by: Christoph Hellwig Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-7-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/fsl_pamu_domain.c | 182 +++--------------------------------- drivers/iommu/fsl_pamu_domain.h | 15 --- drivers/iommu/iommu.c | 11 --- drivers/soc/fsl/qbman/qman_portal.c | 7 -- include/linux/iommu.h | 17 ---- 5 files changed, 14 insertions(+), 218 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index e6bdd38fc184..689035e9d409 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -54,34 +54,18 @@ static int __init iommu_init_mempool(void) return 0; } -static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) -{ - struct dma_window *win_ptr = &dma_domain->win_arr[0]; - struct iommu_domain_geometry *geom; - - geom = &dma_domain->iommu_domain.geometry; - - if (win_ptr->valid) - return win_ptr->paddr + (iova & (win_ptr->size - 1)); - - return 0; -} - /* Map the DMA window corresponding to the LIODN */ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) { int ret; - struct dma_window *wnd = &dma_domain->win_arr[0]; - phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; + struct iommu_domain_geometry *geom = &dma_domain->iommu_domain.geometry; unsigned long flags; spin_lock_irqsave(&iommu_lock, flags); - ret = pamu_config_ppaace(liodn, wnd_addr, - wnd->size, - ~(u32)0, - wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, dma_domain->stash_id, - wnd->prot); + ret = pamu_config_ppaace(liodn, geom->aperture_start, + geom->aperture_end + 1, ~(u32)0, + 0, dma_domain->snoop_id, dma_domain->stash_id, + PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE); spin_unlock_irqrestore(&iommu_lock, flags); if (ret) pr_debug("PAACE configuration failed for liodn %d\n", liodn); @@ -89,33 +73,6 @@ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) return ret; } -/* Update window/subwindow mapping for the LIODN */ -static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr) -{ - int ret; - struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; - phys_addr_t wnd_addr; - unsigned long flags; - - spin_lock_irqsave(&iommu_lock, flags); - - wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; - - ret = pamu_config_ppaace(liodn, wnd_addr, - wnd->size, - ~(u32)0, - wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, dma_domain->stash_id, - wnd->prot); - if (ret) - pr_debug("Window reconfiguration failed for liodn %d\n", - liodn); - - spin_unlock_irqrestore(&iommu_lock, flags); - - return ret; -} - static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, u32 val) { @@ -172,26 +129,6 @@ static int pamu_set_liodn(int liodn, struct device *dev, return ret; } -static int check_size(u64 size, dma_addr_t iova) -{ - /* - * Size must be a power of two and at least be equal - * to PAMU page size. - */ - if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { - pr_debug("Size too small or not a power of two\n"); - return -EINVAL; - } - - /* iova must be page size aligned */ - if (iova & (size - 1)) { - pr_debug("Address is not aligned with window size\n"); - return -EINVAL; - } - - return 0; -} - static void remove_device_ref(struct device_domain_info *info) { unsigned long flags; @@ -257,13 +194,10 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - if (iova < domain->geometry.aperture_start || iova > domain->geometry.aperture_end) return 0; - - return get_phys_addr(dma_domain, iova); + return iova; } static bool fsl_pamu_capable(enum iommu_cap cap) @@ -279,7 +213,6 @@ static void fsl_pamu_domain_free(struct iommu_domain *domain) detach_device(NULL, dma_domain); dma_domain->enabled = 0; - dma_domain->mapped = 0; kmem_cache_free(fsl_pamu_domain_cache, dma_domain); } @@ -323,84 +256,6 @@ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) return ret; } -/* Update domain mappings for all LIODNs associated with the domain */ -static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr) -{ - struct device_domain_info *info; - int ret = 0; - - list_for_each_entry(info, &dma_domain->devices, link) { - ret = update_liodn(info->liodn, dma_domain, wnd_nr); - if (ret) - break; - } - return ret; -} - - -static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - struct dma_window *wnd; - int pamu_prot = 0; - int ret; - unsigned long flags; - u64 win_size; - - if (prot & IOMMU_READ) - pamu_prot |= PAACE_AP_PERMS_QUERY; - if (prot & IOMMU_WRITE) - pamu_prot |= PAACE_AP_PERMS_UPDATE; - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - if (wnd_nr > 0) { - pr_debug("Invalid window index\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } - - win_size = (domain->geometry.aperture_end + 1) >> ilog2(1); - if (size > win_size) { - pr_debug("Invalid window size\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } - - if (dma_domain->enabled) { - pr_debug("Disable the window before updating the mapping\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EBUSY; - } - - ret = check_size(size, domain->geometry.aperture_start); - if (ret) { - pr_debug("Aperture start not aligned to the size\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -EINVAL; - } - - wnd = &dma_domain->win_arr[wnd_nr]; - if (!wnd->valid) { - wnd->paddr = paddr; - wnd->size = size; - wnd->prot = pamu_prot; - - ret = update_domain_mapping(dma_domain, wnd_nr); - if (!ret) { - wnd->valid = 1; - dma_domain->mapped++; - } - } else { - pr_debug("Disable the window before updating the mapping\n"); - ret = -EBUSY; - } - - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - - return ret; -} - /* * Attach the LIODN to the DMA domain and configure the geometry * and window mappings. @@ -434,15 +289,14 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, &domain->geometry); if (ret) break; - if (dma_domain->mapped) { - /* - * Create window/subwindow mapping for - * the LIODN. - */ - ret = map_liodn(liodn[i], dma_domain); - if (ret) - break; - } + + /* + * Create window/subwindow mapping for + * the LIODN. + */ + ret = map_liodn(liodn[i], dma_domain); + if (ret) + break; } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); @@ -552,13 +406,6 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en int ret; spin_lock_irqsave(&dma_domain->domain_lock, flags); - - if (enable && !dma_domain->mapped) { - pr_debug("Can't enable DMA domain without valid mapping\n"); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - return -ENODEV; - } - dma_domain->enabled = enable; list_for_each_entry(info, &dma_domain->devices, link) { ret = (enable) ? pamu_enable_liodn(info->liodn) : @@ -717,7 +564,6 @@ static const struct iommu_ops fsl_pamu_ops = { .domain_free = fsl_pamu_domain_free, .attach_dev = fsl_pamu_attach_device, .detach_dev = fsl_pamu_detach_device, - .domain_window_enable = fsl_pamu_window_enable, .iova_to_phys = fsl_pamu_iova_to_phys, .domain_set_attr = fsl_pamu_set_domain_attr, .probe_device = fsl_pamu_probe_device, diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index d3523ee9999d..13ee06e0ef01 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -9,24 +9,10 @@ #include "fsl_pamu.h" -struct dma_window { - phys_addr_t paddr; - u64 size; - int valid; - int prot; -}; - struct fsl_dma_domain { - /* - * win_arr contains information of the configured - * windows for a domain. - */ - struct dma_window win_arr[1]; /* list of devices associated with the domain */ struct list_head devices; /* dma_domain states: - * mapped - A particular mapping has been created - * within the configured geometry. * enabled - DMA has been enabled for the given * domain. This translates to setting of the * valid bit for the primary PAACE in the PAMU @@ -35,7 +21,6 @@ struct fsl_dma_domain { * enabled for it. * */ - int mapped; int enabled; /* stash_id obtained from the stash attribute details */ u32 stash_id; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index e10cfa99057c..33bc0f8f0a74 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2610,17 +2610,6 @@ size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); } -int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot) -{ - if (unlikely(domain->ops->domain_window_enable == NULL)) - return -ENODEV; - - return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, - prot); -} -EXPORT_SYMBOL_GPL(iommu_domain_window_enable); - /** * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework * @domain: the iommu domain where the fault has happened diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 3d56ec4b373b..9ee1663f422c 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -65,13 +65,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) __func__, ret); goto out_domain_free; } - ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36, - IOMMU_READ | IOMMU_WRITE); - if (ret < 0) { - dev_err(dev, "%s(): iommu_domain_window_enable() = %d", - __func__, ret); - goto out_domain_free; - } ret = iommu_attach_device(pcfg->iommu_domain, dev); if (ret < 0) { dev_err(dev, "%s(): iommu_device_attach() = %d", __func__, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 761aa8b69936..69537e83aae4 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -217,7 +217,6 @@ struct iommu_iotlb_gather { * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges - * @domain_window_enable: Configure and enable a particular window for a domain * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) @@ -275,10 +274,6 @@ struct iommu_ops { struct iommu_domain *domain, struct iommu_resv_region *region); - /* Window handling functions */ - int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot); - int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); @@ -521,11 +516,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); -/* Window handling function prototypes */ -extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t offset, u64 size, - int prot); - extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); @@ -749,13 +739,6 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain, { } -static inline int iommu_domain_window_enable(struct iommu_domain *domain, - u32 wnd_nr, phys_addr_t paddr, - u64 size, int prot) -{ - return -ENODEV; -} - static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { return 0; -- cgit v1.2.3-71-gd317 From 4eeb96f6efac10e66fd10e718d2adeece3879121 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:43 +0200 Subject: iommu/fsl_pamu: replace DOMAIN_ATTR_FSL_PAMU_STASH with a direct call Add a fsl_pamu_configure_l1_stash API that qman_portal can call directly instead of indirecting through the iommu attr API. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-8-hch@lst.de Signed-off-by: Joerg Roedel --- arch/powerpc/include/asm/fsl_pamu_stash.h | 12 +++--------- drivers/iommu/fsl_pamu_domain.c | 16 +++------------- drivers/iommu/fsl_pamu_domain.h | 2 -- drivers/soc/fsl/qbman/qman_portal.c | 18 +++--------------- include/linux/iommu.h | 1 - 5 files changed, 9 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h index 30a31ad2123d..c0fbadb70b5d 100644 --- a/arch/powerpc/include/asm/fsl_pamu_stash.h +++ b/arch/powerpc/include/asm/fsl_pamu_stash.h @@ -7,6 +7,8 @@ #ifndef __FSL_PAMU_STASH_H #define __FSL_PAMU_STASH_H +struct iommu_domain; + /* cache stash targets */ enum pamu_stash_target { PAMU_ATTR_CACHE_L1 = 1, @@ -14,14 +16,6 @@ enum pamu_stash_target { PAMU_ATTR_CACHE_L3, }; -/* - * This attribute allows configuring stashig specific parameters - * in the PAMU hardware. - */ - -struct pamu_stash_attribute { - u32 cpu; /* cpu number */ - u32 cache; /* cache to stash to: L1,L2,L3 */ -}; +int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu); #endif /* __FSL_PAMU_STASH_H */ diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 689035e9d409..557a152c1d2c 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -372,27 +372,20 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, } /* Set the domain stash attribute */ -static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) +int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) { - struct pamu_stash_attribute *stash_attr = data; + struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); unsigned long flags; int ret; spin_lock_irqsave(&dma_domain->domain_lock, flags); - - memcpy(&dma_domain->dma_stash, stash_attr, - sizeof(struct pamu_stash_attribute)); - - dma_domain->stash_id = get_stash_id(stash_attr->cache, - stash_attr->cpu); + dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu); if (dma_domain->stash_id == ~(u32)0) { pr_debug("Invalid stash attributes\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } - ret = update_domain_stash(dma_domain, dma_domain->stash_id); - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return ret; @@ -426,9 +419,6 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, int ret = 0; switch (attr_type) { - case DOMAIN_ATTR_FSL_PAMU_STASH: - ret = configure_domain_stash(dma_domain, data); - break; case DOMAIN_ATTR_FSL_PAMU_ENABLE: ret = configure_domain_dma_state(dma_domain, *(int *)data); break; diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index 13ee06e0ef01..cd488004acd1 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -22,9 +22,7 @@ struct fsl_dma_domain { * */ int enabled; - /* stash_id obtained from the stash attribute details */ u32 stash_id; - struct pamu_stash_attribute dma_stash; u32 snoop_id; struct iommu_domain iommu_domain; spinlock_t domain_lock; diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 9ee1663f422c..798b3a1ffd0b 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -47,7 +47,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) #ifdef CONFIG_FSL_PAMU struct device *dev = pcfg->dev; int window_count = 1; - struct pamu_stash_attribute stash_attr; int ret; pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); @@ -55,13 +54,9 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); goto no_iommu; } - stash_attr.cpu = cpu; - stash_attr.cache = PAMU_ATTR_CACHE_L1; - ret = iommu_domain_set_attr(pcfg->iommu_domain, - DOMAIN_ATTR_FSL_PAMU_STASH, - &stash_attr); + ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu); if (ret < 0) { - dev_err(dev, "%s(): iommu_domain_set_attr() = %d", + dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d", __func__, ret); goto out_domain_free; } @@ -143,15 +138,8 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg, unsigned int cpu) { #ifdef CONFIG_FSL_PAMU /* TODO */ - struct pamu_stash_attribute stash_attr; - int ret; - if (pcfg->iommu_domain) { - stash_attr.cpu = cpu; - stash_attr.cache = PAMU_ATTR_CACHE_L1; - ret = iommu_domain_set_attr(pcfg->iommu_domain, - DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr); - if (ret < 0) { + if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) { dev_err(pcfg->dev, "Failed to update pamu stash setting\n"); return; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 69537e83aae4..a3968122aa69 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -109,7 +109,6 @@ enum iommu_cap { enum iommu_attr { DOMAIN_ATTR_GEOMETRY, DOMAIN_ATTR_PAGING, - DOMAIN_ATTR_FSL_PAMU_STASH, DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, -- cgit v1.2.3-71-gd317 From 7d61cb6ff0122a017ae907aed62478a4db9c5991 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:46 +0200 Subject: iommu/fsl_pamu: enable the liodn when attaching a device Instead of a separate call to enable all devices from the list, just enable the liodn once the device is attached to the iommu domain. This also remove the DOMAIN_ATTR_FSL_PAMU_ENABLE iommu_attr. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-11-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/fsl_pamu_domain.c | 47 +++---------------------------------- drivers/iommu/fsl_pamu_domain.h | 10 -------- drivers/soc/fsl/qbman/qman_portal.c | 11 --------- include/linux/iommu.h | 1 - 4 files changed, 3 insertions(+), 66 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 41927c3c4177..c2e7e17570e7 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -195,9 +195,6 @@ static void fsl_pamu_domain_free(struct iommu_domain *domain) /* remove all the devices from the device list */ detach_device(NULL, dma_domain); - - dma_domain->enabled = 0; - kmem_cache_free(fsl_pamu_domain_cache, dma_domain); } @@ -285,6 +282,9 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, ret = pamu_set_liodn(dma_domain, dev, liodn[i]); if (ret) break; + ret = pamu_enable_liodn(liodn[i]); + if (ret) + break; } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return ret; @@ -341,46 +341,6 @@ int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) return ret; } -/* Configure domain dma state i.e. enable/disable DMA */ -static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) -{ - struct device_domain_info *info; - unsigned long flags; - int ret; - - spin_lock_irqsave(&dma_domain->domain_lock, flags); - dma_domain->enabled = enable; - list_for_each_entry(info, &dma_domain->devices, link) { - ret = (enable) ? pamu_enable_liodn(info->liodn) : - pamu_disable_liodn(info->liodn); - if (ret) - pr_debug("Unable to set dma state for liodn %d", - info->liodn); - } - spin_unlock_irqrestore(&dma_domain->domain_lock, flags); - - return 0; -} - -static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, - enum iommu_attr attr_type, void *data) -{ - struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); - int ret = 0; - - switch (attr_type) { - case DOMAIN_ATTR_FSL_PAMU_ENABLE: - ret = configure_domain_dma_state(dma_domain, *(int *)data); - break; - default: - pr_debug("Unsupported attribute type\n"); - ret = -EINVAL; - break; - } - - return ret; -} - static struct iommu_group *get_device_iommu_group(struct device *dev) { struct iommu_group *group; @@ -505,7 +465,6 @@ static const struct iommu_ops fsl_pamu_ops = { .attach_dev = fsl_pamu_attach_device, .detach_dev = fsl_pamu_detach_device, .iova_to_phys = fsl_pamu_iova_to_phys, - .domain_set_attr = fsl_pamu_set_domain_attr, .probe_device = fsl_pamu_probe_device, .release_device = fsl_pamu_release_device, .device_group = fsl_pamu_device_group, diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index cd488004acd1..5f4ed253f61b 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -12,16 +12,6 @@ struct fsl_dma_domain { /* list of devices associated with the domain */ struct list_head devices; - /* dma_domain states: - * enabled - DMA has been enabled for the given - * domain. This translates to setting of the - * valid bit for the primary PAACE in the PAMU - * PAACT table. Domain geometry should be set and - * it must have a valid mapping before DMA can be - * enabled for it. - * - */ - int enabled; u32 stash_id; u32 snoop_id; struct iommu_domain iommu_domain; diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 798b3a1ffd0b..bf38eb0042ed 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -46,7 +46,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) { #ifdef CONFIG_FSL_PAMU struct device *dev = pcfg->dev; - int window_count = 1; int ret; pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); @@ -66,14 +65,6 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) ret); goto out_domain_free; } - ret = iommu_domain_set_attr(pcfg->iommu_domain, - DOMAIN_ATTR_FSL_PAMU_ENABLE, - &window_count); - if (ret < 0) { - dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__, - ret); - goto out_detach_device; - } no_iommu: #endif @@ -82,8 +73,6 @@ no_iommu: return; #ifdef CONFIG_FSL_PAMU -out_detach_device: - iommu_detach_device(pcfg->iommu_domain, NULL); out_domain_free: iommu_domain_free(pcfg->iommu_domain); pcfg->iommu_domain = NULL; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a3968122aa69..f5caaa8d39be 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -109,7 +109,6 @@ enum iommu_cap { enum iommu_attr { DOMAIN_ATTR_GEOMETRY, DOMAIN_ATTR_PAGING, - DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, -- cgit v1.2.3-71-gd317 From 9fb5fad562fa0a41c84691714d99c23f54168a9e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:50 +0200 Subject: iommu: remove DOMAIN_ATTR_PAGING DOMAIN_ATTR_PAGING is never used. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-15-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 5 ----- include/linux/iommu.h | 1 - 2 files changed, 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 33bc0f8f0a74..93b3c2048c21 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2668,7 +2668,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { struct iommu_domain_geometry *geometry; - bool *paging; int ret = 0; switch (attr) { @@ -2676,10 +2675,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain, geometry = data; *geometry = domain->geometry; - break; - case DOMAIN_ATTR_PAGING: - paging = data; - *paging = (domain->pgsize_bitmap != 0UL); break; default: if (!domain->ops->domain_get_attr) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f5caaa8d39be..85b51084b820 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -108,7 +108,6 @@ enum iommu_cap { enum iommu_attr { DOMAIN_ATTR_GEOMETRY, - DOMAIN_ATTR_PAGING, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, -- cgit v1.2.3-71-gd317 From bc9a05eef113e75cfa792fdf24dae011bc3d5294 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:51 +0200 Subject: iommu: remove DOMAIN_ATTR_GEOMETRY The geometry information can be trivially queried from the iommu_domain struture. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-16-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 20 +++----------------- drivers/vfio/vfio_iommu_type1.c | 26 ++++++++++++-------------- drivers/vhost/vdpa.c | 10 +++------- include/linux/iommu.h | 1 - 4 files changed, 18 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 93b3c2048c21..703e67d8cb04 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2667,23 +2667,9 @@ core_initcall(iommu_init); int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { - struct iommu_domain_geometry *geometry; - int ret = 0; - - switch (attr) { - case DOMAIN_ATTR_GEOMETRY: - geometry = data; - *geometry = domain->geometry; - - break; - default: - if (!domain->ops->domain_get_attr) - return -EINVAL; - - ret = domain->ops->domain_get_attr(domain, attr, data); - } - - return ret; + if (!domain->ops->domain_get_attr) + return -EINVAL; + return domain->ops->domain_get_attr(domain, attr, data); } EXPORT_SYMBOL_GPL(iommu_domain_get_attr); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 4bb162c1d649..c8e57f22f421 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -2252,7 +2252,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, int ret; bool resv_msi, msi_remap; phys_addr_t resv_msi_base = 0; - struct iommu_domain_geometry geo; + struct iommu_domain_geometry *geo; LIST_HEAD(iova_copy); LIST_HEAD(group_resv_regions); @@ -2333,10 +2333,9 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, goto out_domain; /* Get aperture info */ - iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo); - - if (vfio_iommu_aper_conflict(iommu, geo.aperture_start, - geo.aperture_end)) { + geo = &domain->domain->geometry; + if (vfio_iommu_aper_conflict(iommu, geo->aperture_start, + geo->aperture_end)) { ret = -EINVAL; goto out_detach; } @@ -2359,8 +2358,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (ret) goto out_detach; - ret = vfio_iommu_aper_resize(&iova_copy, geo.aperture_start, - geo.aperture_end); + ret = vfio_iommu_aper_resize(&iova_copy, geo->aperture_start, + geo->aperture_end); if (ret) goto out_detach; @@ -2493,7 +2492,6 @@ static void vfio_iommu_aper_expand(struct vfio_iommu *iommu, struct list_head *iova_copy) { struct vfio_domain *domain; - struct iommu_domain_geometry geo; struct vfio_iova *node; dma_addr_t start = 0; dma_addr_t end = (dma_addr_t)~0; @@ -2502,12 +2500,12 @@ static void vfio_iommu_aper_expand(struct vfio_iommu *iommu, return; list_for_each_entry(domain, &iommu->domain_list, next) { - iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, - &geo); - if (geo.aperture_start > start) - start = geo.aperture_start; - if (geo.aperture_end < end) - end = geo.aperture_end; + struct iommu_domain_geometry *geo = &domain->domain->geometry; + + if (geo->aperture_start > start) + start = geo->aperture_start; + if (geo->aperture_end < end) + end = geo->aperture_end; } /* Modify aperture limits. The new aper is either same or bigger */ diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index ef688c8c0e0e..25824fab433d 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -826,18 +826,14 @@ static void vhost_vdpa_free_domain(struct vhost_vdpa *v) static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v) { struct vdpa_iova_range *range = &v->range; - struct iommu_domain_geometry geo; struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; if (ops->get_iova_range) { *range = ops->get_iova_range(vdpa); - } else if (v->domain && - !iommu_domain_get_attr(v->domain, - DOMAIN_ATTR_GEOMETRY, &geo) && - geo.force_aperture) { - range->first = geo.aperture_start; - range->last = geo.aperture_end; + } else if (v->domain && v->domain->geometry.force_aperture) { + range->first = v->domain->geometry.aperture_start; + range->last = v->domain->geometry.aperture_end; } else { range->first = 0; range->last = ULLONG_MAX; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 85b51084b820..207446cc6d3f 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -107,7 +107,6 @@ enum iommu_cap { */ enum iommu_attr { - DOMAIN_ATTR_GEOMETRY, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, -- cgit v1.2.3-71-gd317 From 7e147547783a9035df816864b6a45ffbb254d700 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:52 +0200 Subject: iommu: remove DOMAIN_ATTR_NESTING Use an explicit enable_nesting method instead. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-17-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 43 +++++++++++------------------ drivers/iommu/arm/arm-smmu/arm-smmu.c | 30 ++++++++++---------- drivers/iommu/intel/iommu.c | 31 ++++++--------------- drivers/iommu/iommu.c | 10 +++++++ drivers/vfio/vfio_iommu_type1.c | 5 +--- include/linux/iommu.h | 4 ++- 6 files changed, 55 insertions(+), 68 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 1c7721b89fec..7f3a122e6b5f 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2555,15 +2555,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); switch (domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - switch (attr) { - case DOMAIN_ATTR_NESTING: - *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); - return 0; - default: - return -ENODEV; - } - break; case IOMMU_DOMAIN_DMA: switch (attr) { case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: @@ -2587,23 +2578,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, mutex_lock(&smmu_domain->init_mutex); switch (domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - switch (attr) { - case DOMAIN_ATTR_NESTING: - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; - } - - if (*(int *)data) - smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - break; - default: - ret = -ENODEV; - } - break; case IOMMU_DOMAIN_DMA: switch(attr) { case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: @@ -2617,11 +2591,25 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ret = -EINVAL; } -out_unlock: mutex_unlock(&smmu_domain->init_mutex); return ret; } +static int arm_smmu_enable_nesting(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret = 0; + + mutex_lock(&smmu_domain->init_mutex); + if (smmu_domain->smmu) + ret = -EPERM; + else + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + mutex_unlock(&smmu_domain->init_mutex); + + return ret; +} + static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) { return iommu_fwspec_add_ids(dev, args->args, 1); @@ -2721,6 +2709,7 @@ static struct iommu_ops arm_smmu_ops = { .device_group = arm_smmu_device_group, .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, + .enable_nesting = arm_smmu_enable_nesting, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index d8c6bfde6a61..0aa6d6672749 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1489,9 +1489,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, switch(domain->type) { case IOMMU_DOMAIN_UNMANAGED: switch (attr) { - case DOMAIN_ATTR_NESTING: - *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); - return 0; case DOMAIN_ATTR_IO_PGTABLE_CFG: { struct io_pgtable_domain_attr *pgtbl_cfg = data; *pgtbl_cfg = smmu_domain->pgtbl_cfg; @@ -1519,6 +1516,21 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, } } +static int arm_smmu_enable_nesting(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret = 0; + + mutex_lock(&smmu_domain->init_mutex); + if (smmu_domain->smmu) + ret = -EPERM; + else + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + mutex_unlock(&smmu_domain->init_mutex); + + return ret; +} + static int arm_smmu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { @@ -1530,17 +1542,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, switch(domain->type) { case IOMMU_DOMAIN_UNMANAGED: switch (attr) { - case DOMAIN_ATTR_NESTING: - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; - } - - if (*(int *)data) - smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - break; case DOMAIN_ATTR_IO_PGTABLE_CFG: { struct io_pgtable_domain_attr *pgtbl_cfg = data; @@ -1633,6 +1634,7 @@ static struct iommu_ops arm_smmu_ops = { .device_group = arm_smmu_device_group, .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, + .enable_nesting = arm_smmu_enable_nesting, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 1ea0f0bd6973..761feac91767 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5396,32 +5396,19 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, } static int -intel_iommu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +intel_iommu_enable_nesting(struct iommu_domain *domain) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); unsigned long flags; - int ret = 0; - - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; + int ret = -ENODEV; - switch (attr) { - case DOMAIN_ATTR_NESTING: - spin_lock_irqsave(&device_domain_lock, flags); - if (nested_mode_support() && - list_empty(&dmar_domain->devices)) { - dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE; - dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL; - } else { - ret = -ENODEV; - } - spin_unlock_irqrestore(&device_domain_lock, flags); - break; - default: - ret = -EINVAL; - break; + spin_lock_irqsave(&device_domain_lock, flags); + if (nested_mode_support() && list_empty(&dmar_domain->devices)) { + dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE; + dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL; + ret = 0; } + spin_unlock_irqrestore(&device_domain_lock, flags); return ret; } @@ -5550,7 +5537,7 @@ const struct iommu_ops intel_iommu_ops = { .domain_alloc = intel_iommu_domain_alloc, .domain_free = intel_iommu_domain_free, .domain_get_attr = intel_iommu_domain_get_attr, - .domain_set_attr = intel_iommu_domain_set_attr, + .enable_nesting = intel_iommu_enable_nesting, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, .aux_attach_dev = intel_iommu_aux_attach_device, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 703e67d8cb04..4738bc024240 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2690,6 +2690,16 @@ int iommu_domain_set_attr(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_domain_set_attr); +int iommu_enable_nesting(struct iommu_domain *domain) +{ + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; + if (!domain->ops->enable_nesting) + return -EINVAL; + return domain->ops->enable_nesting(domain); +} +EXPORT_SYMBOL_GPL(iommu_enable_nesting); + void iommu_get_resv_regions(struct device *dev, struct list_head *list) { const struct iommu_ops *ops = dev->bus->iommu_ops; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index c8e57f22f421..1201482e6eec 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -2320,10 +2320,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, } if (iommu->nesting) { - int attr = 1; - - ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING, - &attr); + ret = iommu_enable_nesting(domain->domain); if (ret) goto out_domain; } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 207446cc6d3f..d7d76b5e1192 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -107,7 +107,6 @@ enum iommu_cap { */ enum iommu_attr { - DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, DOMAIN_ATTR_MAX, @@ -210,6 +209,7 @@ struct iommu_iotlb_gather { * @device_group: find iommu group for a particular device * @domain_get_attr: Query domain attributes * @domain_set_attr: Change domain attributes + * @enable_nesting: Enable nesting * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges @@ -262,6 +262,7 @@ struct iommu_ops { enum iommu_attr attr, void *data); int (*domain_set_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); + int (*enable_nesting)(struct iommu_domain *domain); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); @@ -511,6 +512,7 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, void *data); extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); +int iommu_enable_nesting(struct iommu_domain *domain); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); -- cgit v1.2.3-71-gd317 From a250c23f15c21c556becd4986f453255e545807c Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 1 Apr 2021 17:52:54 +0200 Subject: iommu: remove DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE Instead make the global iommu_dma_strict paramete in iommu.c canonical by exporting helpers to get and set it and use those directly in the drivers. This make sure that the iommu.strict parameter also works for the AMD and Intel IOMMU drivers on x86. As those default to lazy flushing a new IOMMU_CMD_LINE_STRICT is used to turn the value into a tristate to represent the default if not overriden by an explicit parameter. [ported on top of the other iommu_attr changes and added a few small missing bits] Signed-off-by: Robin Murphy . Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210401155256.298656-19-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 23 +---------- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 50 +--------------------- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 1 - drivers/iommu/arm/arm-smmu/arm-smmu.c | 27 ++---------- drivers/iommu/dma-iommu.c | 9 +--- drivers/iommu/intel/iommu.c | 64 ++++++----------------------- drivers/iommu/iommu.c | 30 ++++++++++---- include/linux/iommu.h | 4 +- 8 files changed, 43 insertions(+), 165 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index a69a8b573e40..ce6393d2224d 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1771,26 +1771,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) return acpihid_device_group(dev); } -static int amd_iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - switch (domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - return -ENODEV; - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = !amd_iommu_unmap_flush; - return 0; - default: - return -ENODEV; - } - break; - default: - return -EINVAL; - } -} - /***************************************************************************** * * The next functions belong to the dma_ops mapping/unmapping code. @@ -1855,7 +1835,7 @@ int __init amd_iommu_init_dma_ops(void) pr_info("IO/TLB flush on unmap enabled\n"); else pr_info("Lazy IO/TLB flushing enabled\n"); - + iommu_set_dma_strict(amd_iommu_unmap_flush); return 0; } @@ -2257,7 +2237,6 @@ const struct iommu_ops amd_iommu_ops = { .release_device = amd_iommu_release_device, .probe_finalize = amd_iommu_probe_finalize, .device_group = amd_iommu_device_group, - .domain_get_attr = amd_iommu_domain_get_attr, .get_resv_regions = amd_iommu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 7f3a122e6b5f..7fe45838298c 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2040,7 +2040,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain, .iommu_dev = smmu->dev, }; - if (smmu_domain->non_strict) + if (!iommu_get_dma_strict(domain)) pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); @@ -2549,52 +2549,6 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) return group; } -static int arm_smmu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - - switch (domain->type) { - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = smmu_domain->non_strict; - return 0; - default: - return -ENODEV; - } - break; - default: - return -EINVAL; - } -} - -static int arm_smmu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - int ret = 0; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - - mutex_lock(&smmu_domain->init_mutex); - - switch (domain->type) { - case IOMMU_DOMAIN_DMA: - switch(attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - smmu_domain->non_strict = *(int *)data; - break; - default: - ret = -ENODEV; - } - break; - default: - ret = -EINVAL; - } - - mutex_unlock(&smmu_domain->init_mutex); - return ret; -} - static int arm_smmu_enable_nesting(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); @@ -2707,8 +2661,6 @@ static struct iommu_ops arm_smmu_ops = { .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, - .domain_get_attr = arm_smmu_domain_get_attr, - .domain_set_attr = arm_smmu_domain_set_attr, .enable_nesting = arm_smmu_enable_nesting, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 7b15b7580c6e..05cf2bf42d33 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -677,7 +677,6 @@ struct arm_smmu_domain { struct mutex init_mutex; /* Protects smmu pointer */ struct io_pgtable_ops *pgtbl_ops; - bool non_strict; atomic_t nr_ats_masters; enum arm_smmu_domain_stage stage; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 0aa6d6672749..f17c54a76ef6 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -761,6 +761,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .iommu_dev = smmu->dev, }; + if (!iommu_get_dma_strict(domain)) + pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; + if (smmu->impl && smmu->impl->init_context) { ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev); if (ret) @@ -1499,18 +1502,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, return -ENODEV; } break; - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: { - bool non_strict = smmu_domain->pgtbl_cfg.quirks & - IO_PGTABLE_QUIRK_NON_STRICT; - *(int *)data = non_strict; - return 0; - } - default: - return -ENODEV; - } - break; default: return -EINVAL; } @@ -1557,18 +1548,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ret = -ENODEV; } break; - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - if (*(int *)data) - smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; - else - smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT; - break; - default: - ret = -ENODEV; - } - break; default: ret = -EINVAL; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 0af4048f9b3f..efaf5eab40a1 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -306,10 +306,7 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) cookie = container_of(iovad, struct iommu_dma_cookie, iovad); domain = cookie->fq_domain; - /* - * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE - * implies that ops->flush_iotlb_all must be non-NULL. - */ + domain->ops->flush_iotlb_all(domain); } @@ -336,7 +333,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, struct iommu_dma_cookie *cookie = domain->iova_cookie; unsigned long order, base_pfn; struct iova_domain *iovad; - int attr; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -373,8 +369,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, init_iova_domain(iovad, 1UL << order, base_pfn); if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) && - !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && - attr) { + domain->ops->flush_iotlb_all && !iommu_get_dma_strict(domain)) { if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, iommu_dma_entry_dtor)) pr_warn("iova flush queue initialization failed\n"); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 761feac91767..1db4d8934526 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4347,6 +4347,17 @@ int __init intel_iommu_init(void) down_read(&dmar_global_lock); for_each_active_iommu(iommu, drhd) { + /* + * The flush queue implementation does not perform + * page-selective invalidations that are required for efficient + * TLB flushes in virtual environments. The benefit of batching + * is likely to be much lower than the overhead of synchronizing + * the virtual and physical IOMMU page-tables. + */ + if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { + pr_warn("IOMMU batching is disabled due to virtualization"); + intel_iommu_strict = 1; + } iommu_device_sysfs_add(&iommu->iommu, NULL, intel_iommu_groups, "%s", iommu->name); @@ -4355,6 +4366,7 @@ int __init intel_iommu_init(void) } up_read(&dmar_global_lock); + iommu_set_dma_strict(intel_iommu_strict); bus_set_iommu(&pci_bus_type, &intel_iommu_ops); if (si_domain && !hw_pass_through) register_memory_notifier(&intel_iommu_memory_nb); @@ -5413,57 +5425,6 @@ intel_iommu_enable_nesting(struct iommu_domain *domain) return ret; } -static bool domain_use_flush_queue(void) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; - bool r = true; - - if (intel_iommu_strict) - return false; - - /* - * The flush queue implementation does not perform page-selective - * invalidations that are required for efficient TLB flushes in virtual - * environments. The benefit of batching is likely to be much lower than - * the overhead of synchronizing the virtual and physical IOMMU - * page-tables. - */ - rcu_read_lock(); - for_each_active_iommu(iommu, drhd) { - if (!cap_caching_mode(iommu->cap)) - continue; - - pr_warn_once("IOMMU batching is disabled due to virtualization"); - r = false; - break; - } - rcu_read_unlock(); - - return r; -} - -static int -intel_iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - switch (domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - return -ENODEV; - case IOMMU_DOMAIN_DMA: - switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = domain_use_flush_queue(); - return 0; - default: - return -ENODEV; - } - break; - default: - return -EINVAL; - } -} - /* * Check that the device does not live on an external facing PCI port that is * marked as untrusted. Such devices should not be able to apply quirks and @@ -5536,7 +5497,6 @@ const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, .domain_free = intel_iommu_domain_free, - .domain_get_attr = intel_iommu_domain_get_attr, .enable_nesting = intel_iommu_enable_nesting, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7d1f50ac3709..b09d3f07f3df 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -69,6 +69,7 @@ static const char * const iommu_group_resv_type_string[] = { }; #define IOMMU_CMD_LINE_DMA_API BIT(0) +#define IOMMU_CMD_LINE_STRICT BIT(1) static int iommu_alloc_default_domain(struct iommu_group *group, struct device *dev); @@ -318,10 +319,29 @@ early_param("iommu.passthrough", iommu_set_def_domain_type); static int __init iommu_dma_setup(char *str) { - return kstrtobool(str, &iommu_dma_strict); + int ret = kstrtobool(str, &iommu_dma_strict); + + if (!ret) + iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; + return ret; } early_param("iommu.strict", iommu_dma_setup); +void iommu_set_dma_strict(bool strict) +{ + if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT)) + iommu_dma_strict = strict; +} + +bool iommu_get_dma_strict(struct iommu_domain *domain) +{ + /* only allow lazy flushing for DMA domains */ + if (domain->type == IOMMU_DOMAIN_DMA) + return iommu_dma_strict; + return true; +} +EXPORT_SYMBOL_GPL(iommu_get_dma_strict); + static ssize_t iommu_group_attr_show(struct kobject *kobj, struct attribute *__attr, char *buf) { @@ -1500,14 +1520,6 @@ static int iommu_group_alloc_default_domain(struct bus_type *bus, group->default_domain = dom; if (!group->domain) group->domain = dom; - - if (!iommu_dma_strict) { - int attr = 1; - iommu_domain_set_attr(dom, - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, - &attr); - } - return 0; } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index d7d76b5e1192..9349bdd62e91 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -107,7 +107,6 @@ enum iommu_cap { */ enum iommu_attr { - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_IO_PGTABLE_CFG, DOMAIN_ATTR_MAX, }; @@ -514,6 +513,9 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); int iommu_enable_nesting(struct iommu_domain *domain); +void iommu_set_dma_strict(bool val); +bool iommu_get_dma_strict(struct iommu_domain *domain); + extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); -- cgit v1.2.3-71-gd317 From 4fc52b81e87be583efb834df5b58245cb9ddd3e7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:55 +0200 Subject: iommu: remove DOMAIN_ATTR_IO_PGTABLE_CFG Use an explicit set_pgtable_quirks method instead that just passes the actual quirk bitmask instead. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Acked-by: Li Yang Link: https://lore.kernel.org/r/20210401155256.298656-20-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 5 +-- drivers/iommu/arm/arm-smmu/arm-smmu.c | 64 ++++++--------------------------- drivers/iommu/arm/arm-smmu/arm-smmu.h | 2 +- drivers/iommu/iommu.c | 11 ++++++ include/linux/io-pgtable.h | 4 --- include/linux/iommu.h | 12 ++++++- 6 files changed, 35 insertions(+), 63 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 0f184c3dd9d9..4a0b14dad93e 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -188,10 +188,7 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) void adreno_set_llc_attributes(struct iommu_domain *iommu) { - struct io_pgtable_domain_attr pgtbl_cfg; - - pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; - iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg); + iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA); } struct msm_gem_address_space * diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index f17c54a76ef6..3c6adcdb201b 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -770,8 +770,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, goto out_clear_smmu; } - if (smmu_domain->pgtbl_cfg.quirks) - pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks; + if (smmu_domain->pgtbl_quirks) + pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) { @@ -1484,29 +1484,6 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) return group; } -static int arm_smmu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - - switch(domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - switch (attr) { - case DOMAIN_ATTR_IO_PGTABLE_CFG: { - struct io_pgtable_domain_attr *pgtbl_cfg = data; - *pgtbl_cfg = smmu_domain->pgtbl_cfg; - - return 0; - } - default: - return -ENODEV; - } - break; - default: - return -EINVAL; - } -} - static int arm_smmu_enable_nesting(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); @@ -1522,37 +1499,19 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain) return ret; } -static int arm_smmu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain, + unsigned long quirks) { - int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret = 0; mutex_lock(&smmu_domain->init_mutex); - - switch(domain->type) { - case IOMMU_DOMAIN_UNMANAGED: - switch (attr) { - case DOMAIN_ATTR_IO_PGTABLE_CFG: { - struct io_pgtable_domain_attr *pgtbl_cfg = data; - - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; - } - - smmu_domain->pgtbl_cfg = *pgtbl_cfg; - break; - } - default: - ret = -ENODEV; - } - break; - default: - ret = -EINVAL; - } -out_unlock: + if (smmu_domain->smmu) + ret = -EPERM; + else + smmu_domain->pgtbl_quirks = quirks; mutex_unlock(&smmu_domain->init_mutex); + return ret; } @@ -1611,9 +1570,8 @@ static struct iommu_ops arm_smmu_ops = { .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, - .domain_get_attr = arm_smmu_domain_get_attr, - .domain_set_attr = arm_smmu_domain_set_attr, .enable_nesting = arm_smmu_enable_nesting, + .set_pgtable_quirks = arm_smmu_set_pgtable_quirks, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index d2a2d1bc58ba..c31a59d35c64 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -364,7 +364,7 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; - struct io_pgtable_domain_attr pgtbl_cfg; + unsigned long pgtbl_quirks; const struct iommu_flush_ops *flush_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b09d3f07f3df..7ea5135b1431 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2701,6 +2701,17 @@ int iommu_enable_nesting(struct iommu_domain *domain) } EXPORT_SYMBOL_GPL(iommu_enable_nesting); +int iommu_set_pgtable_quirks(struct iommu_domain *domain, + unsigned long quirk) +{ + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; + if (!domain->ops->set_pgtable_quirks) + return -EINVAL; + return domain->ops->set_pgtable_quirks(domain, quirk); +} +EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); + void iommu_get_resv_regions(struct device *dev, struct list_head *list) { const struct iommu_ops *ops = dev->bus->iommu_ops; diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index a4c9ca2c31f1..4d40dfa75b55 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -204,10 +204,6 @@ struct io_pgtable { #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) -struct io_pgtable_domain_attr { - unsigned long quirks; -}; - static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9349bdd62e91..fbac49fe0880 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -107,7 +107,6 @@ enum iommu_cap { */ enum iommu_attr { - DOMAIN_ATTR_IO_PGTABLE_CFG, DOMAIN_ATTR_MAX, }; @@ -209,6 +208,7 @@ struct iommu_iotlb_gather { * @domain_get_attr: Query domain attributes * @domain_set_attr: Change domain attributes * @enable_nesting: Enable nesting + * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges @@ -262,6 +262,8 @@ struct iommu_ops { int (*domain_set_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); int (*enable_nesting)(struct iommu_domain *domain); + int (*set_pgtable_quirks)(struct iommu_domain *domain, + unsigned long quirks); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); @@ -512,6 +514,8 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); int iommu_enable_nesting(struct iommu_domain *domain); +int iommu_set_pgtable_quirks(struct iommu_domain *domain, + unsigned long quirks); void iommu_set_dma_strict(bool val); bool iommu_get_dma_strict(struct iommu_domain *domain); @@ -891,6 +895,12 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain, return -EINVAL; } +static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, + unsigned long quirks) +{ + return 0; +} + static inline int iommu_device_register(struct iommu_device *iommu) { return -ENODEV; -- cgit v1.2.3-71-gd317 From 7876a83ffe8c23c7049a63c747a7b96cafaf10a4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Apr 2021 17:52:56 +0200 Subject: iommu: remove iommu_domain_{get,set}_attr Remove the now unused iommu attr infrastructure. Signed-off-by: Christoph Hellwig Acked-by: Will Deacon Link: https://lore.kernel.org/r/20210401155256.298656-21-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 26 -------------------------- include/linux/iommu.h | 36 ------------------------------------ 2 files changed, 62 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7ea5135b1431..c4ad9c644802 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2665,32 +2665,6 @@ static int __init iommu_init(void) } core_initcall(iommu_init); -int iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - if (!domain->ops->domain_get_attr) - return -EINVAL; - return domain->ops->domain_get_attr(domain, attr, data); -} -EXPORT_SYMBOL_GPL(iommu_domain_get_attr); - -int iommu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - int ret = 0; - - switch (attr) { - default: - if (domain->ops->domain_set_attr == NULL) - return -EINVAL; - - ret = domain->ops->domain_set_attr(domain, attr, data); - } - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_domain_set_attr); - int iommu_enable_nesting(struct iommu_domain *domain) { if (domain->type != IOMMU_DOMAIN_UNMANAGED) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index fbac49fe0880..a5b3af54fbb8 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -96,20 +96,6 @@ enum iommu_cap { IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ }; -/* - * Following constraints are specifc to FSL_PAMUV1: - * -aperture must be power of 2, and naturally aligned - * -number of windows must be power of 2, and address space size - * of each window is determined by aperture size / # of windows - * -the actual size of the mapped region of a window must be power - * of 2 starting with 4KB and physical address must be naturally - * aligned. - */ - -enum iommu_attr { - DOMAIN_ATTR_MAX, -}; - /* These are the possible reserved region types */ enum iommu_resv_type { /* Memory regions which must be mapped 1:1 at all times */ @@ -205,8 +191,6 @@ struct iommu_iotlb_gather { * @probe_finalize: Do final setup work after the device is added to an IOMMU * group and attached to the groups domain * @device_group: find iommu group for a particular device - * @domain_get_attr: Query domain attributes - * @domain_set_attr: Change domain attributes * @enable_nesting: Enable nesting * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @get_resv_regions: Request list of reserved regions for a device @@ -257,10 +241,6 @@ struct iommu_ops { void (*release_device)(struct device *dev); void (*probe_finalize)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); - int (*domain_get_attr)(struct iommu_domain *domain, - enum iommu_attr attr, void *data); - int (*domain_set_attr)(struct iommu_domain *domain, - enum iommu_attr attr, void *data); int (*enable_nesting)(struct iommu_domain *domain); int (*set_pgtable_quirks)(struct iommu_domain *domain, unsigned long quirks); @@ -509,10 +489,6 @@ extern int iommu_page_response(struct device *dev, extern int iommu_group_id(struct iommu_group *group); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); -extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, - void *data); -extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, - void *data); int iommu_enable_nesting(struct iommu_domain *domain); int iommu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirks); @@ -883,18 +859,6 @@ static inline int iommu_group_id(struct iommu_group *group) return -ENODEV; } -static inline int iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - return -EINVAL; -} - -static inline int iommu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - return -EINVAL; -} - static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirks) { -- cgit v1.2.3-71-gd317 From d151c85c52a314c6ecb91ab35b3f696a6778b509 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 2 Apr 2021 16:33:09 +0200 Subject: iommu/amd: Remove the unused device errata code The device errata mechism is entirely unused, so remove it. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210402143312.372386-2-hch@lst.de Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu_types.h | 1 - drivers/iommu/amd/iommu.c | 56 ++----------------------------------- include/linux/amd-iommu.h | 18 ------------ 3 files changed, 3 insertions(+), 72 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 6937e3674a16..94c1a7a9876d 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -693,7 +693,6 @@ struct iommu_dev_data { } ats; /* ATS state */ bool pri_tlp; /* PASID TLB required for PPR completions */ - u32 errata; /* Bitmap for errata to apply */ bool use_vapic; /* Enable device to use vapic mode */ bool defer_attach; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 70814ab0b374..9b1a01602bb3 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -290,15 +290,6 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev) return true; } -static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum) -{ - struct iommu_dev_data *dev_data; - - dev_data = dev_iommu_priv_get(&pdev->dev); - - return dev_data->errata & (1 << erratum) ? true : false; -} - /* * This function checks if the driver got a valid device from the caller to * avoid dereferencing invalid pointers. @@ -1531,33 +1522,9 @@ static void pdev_iommuv2_disable(struct pci_dev *pdev) pci_disable_pasid(pdev); } -/* FIXME: Change generic reset-function to do the same */ -static int pri_reset_while_enabled(struct pci_dev *pdev) -{ - u16 control; - int pos; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); - if (!pos) - return -EINVAL; - - pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); - control |= PCI_PRI_CTRL_RESET; - pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); - - return 0; -} - static int pdev_iommuv2_enable(struct pci_dev *pdev) { - bool reset_enable; - int reqs, ret; - - /* FIXME: Hardcode number of outstanding requests for now */ - reqs = 32; - if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE)) - reqs = 1; - reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET); + int ret; /* Only allow access to user-accessible pages */ ret = pci_enable_pasid(pdev, 0); @@ -1570,16 +1537,11 @@ static int pdev_iommuv2_enable(struct pci_dev *pdev) goto out_err; /* Enable PRI */ - ret = pci_enable_pri(pdev, reqs); + /* FIXME: Hardcode number of outstanding requests for now */ + ret = pci_enable_pri(pdev, 32); if (ret) goto out_err; - if (reset_enable) { - ret = pri_reset_while_enabled(pdev); - if (ret) - goto out_err; - } - ret = pci_enable_ats(pdev, PAGE_SHIFT); if (ret) goto out_err; @@ -2594,18 +2556,6 @@ struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) } EXPORT_SYMBOL(amd_iommu_get_v2_domain); -void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum) -{ - struct iommu_dev_data *dev_data; - - if (!amd_iommu_v2_supported()) - return; - - dev_data = dev_iommu_priv_get(&pdev->dev); - dev_data->errata |= (1 << erratum); -} -EXPORT_SYMBOL(amd_iommu_enable_device_erratum); - int amd_iommu_device_info(struct pci_dev *pdev, struct amd_iommu_device_info *info) { diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 450717299928..474065ed88a4 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -32,24 +32,6 @@ struct pci_dev; extern int amd_iommu_detect(void); extern int amd_iommu_init_hardware(void); -/** - * amd_iommu_enable_device_erratum() - Enable erratum workaround for device - * in the IOMMUv2 driver - * @pdev: The PCI device the workaround is necessary for - * @erratum: The erratum workaround to enable - * - * The function needs to be called before amd_iommu_init_device(). - * Possible values for the erratum number are for now: - * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI - * is enabled - * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI - * requests to one - */ -#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0 -#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1 - -extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum); - /** * amd_iommu_init_device() - Init device for use with IOMMUv2 driver * @pdev: The PCI device to initialize -- cgit v1.2.3-71-gd317 From fc1b6620501f1a4b88f583549c63666180bea177 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 2 Apr 2021 16:33:12 +0200 Subject: iommu/amd: Move a few prototypes to include/linux/amd-iommu.h A few functions that were intentended for the perf events support are currently declared in arch/x86/events/amd/iommu.h, which mens they are not in scope for the actual function definition. Also amdkfd has started using a few of them using externs in a .c file. End that misery by moving the prototypes to the proper header. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210402143312.372386-5-hch@lst.de Signed-off-by: Joerg Roedel --- arch/x86/events/amd/iommu.c | 1 + arch/x86/events/amd/iommu.h | 19 ------------------- drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 4 ---- drivers/iommu/amd/amd_iommu.h | 1 - include/linux/amd-iommu.h | 12 ++++++++++++ 5 files changed, 13 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index be50ef8572cc..a573fac1ece8 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "../perf_event.h" #include "iommu.h" diff --git a/arch/x86/events/amd/iommu.h b/arch/x86/events/amd/iommu.h index 0e5c036fd7be..095505c6213b 100644 --- a/arch/x86/events/amd/iommu.h +++ b/arch/x86/events/amd/iommu.h @@ -21,23 +21,4 @@ #define PC_MAX_SPEC_BNKS 64 #define PC_MAX_SPEC_CNTRS 16 -struct amd_iommu; - -/* amd_iommu_init.c external support functions */ -extern int amd_iommu_get_num_iommus(void); - -extern bool amd_iommu_pc_supported(void); - -extern u8 amd_iommu_pc_get_max_banks(unsigned int idx); - -extern u8 amd_iommu_pc_get_max_counters(unsigned int idx); - -extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, - u8 fxn, u64 *value); - -extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, - u8 fxn, u64 *value); - -extern struct amd_iommu *get_amd_iommu(int idx); - #endif /*_PERF_EVENT_AMD_IOMMU_H_*/ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 66bbca61e3ef..3c56333bbb9c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -329,10 +329,6 @@ int kfd_iommu_resume(struct kfd_dev *kfd) return 0; } -extern bool amd_iommu_pc_supported(void); -extern u8 amd_iommu_pc_get_max_banks(u16 devid); -extern u8 amd_iommu_pc_get_max_counters(u16 devid); - /** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology */ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev) diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index ed46dc754a2a..55dd38d814d9 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -11,7 +11,6 @@ #include "amd_iommu_types.h" -extern int amd_iommu_get_num_iommus(void); extern int amd_iommu_init_dma_ops(void); extern int amd_iommu_init_passthrough(void); extern irqreturn_t amd_iommu_int_thread(int irq, void *data); diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 474065ed88a4..58e6c3806c09 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -10,6 +10,8 @@ #include +struct amd_iommu; + /* * This is mainly used to communicate information back-and-forth * between SVM and IOMMU for setting up and tearing down posted @@ -194,4 +196,14 @@ static inline int amd_iommu_deactivate_guest_mode(void *data) } #endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ +int amd_iommu_get_num_iommus(void); +bool amd_iommu_pc_supported(void); +u8 amd_iommu_pc_get_max_banks(unsigned int idx); +u8 amd_iommu_pc_get_max_counters(unsigned int idx); +int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, + u64 *value); +int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, + u64 *value); +struct amd_iommu *get_amd_iommu(unsigned int idx); + #endif /* _ASM_X86_AMD_IOMMU_H */ -- cgit v1.2.3-71-gd317 From c0474a606ecb9326227b4d68059942f9db88a897 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 20 Mar 2021 10:54:13 +0800 Subject: iommu/vt-d: Invalidate PASID cache when root/context entry changed When the Intel IOMMU is operating in the scalable mode, some information from the root and context table may be used to tag entries in the PASID cache. Software should invalidate the PASID-cache when changing root or context table entries. Suggested-by: Ashok Raj Fixes: 7373a8cc38197 ("iommu/vt-d: Setup context and enable RID2PASID support") Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20210320025415.641201-4-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 18 +++++++++--------- include/linux/intel-iommu.h | 1 + 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b0f901ee6358..c981e69bc107 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1347,6 +1347,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) readl, (sts & DMA_GSTS_RTPS), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); + + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); + if (sm_supported(iommu)) + qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); } void iommu_flush_write_buffer(struct intel_iommu *iommu) @@ -2430,6 +2435,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); + + if (sm_supported(iommu)) + qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0); + iommu->flush.flush_iotlb(iommu, did_old, 0, @@ -3278,8 +3287,6 @@ static int __init init_dmars(void) register_pasid_allocator(iommu); #endif iommu_set_root_entry(iommu); - iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); } #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA @@ -3469,12 +3476,7 @@ static int init_iommu_hw(void) } iommu_flush_write_buffer(iommu); - iommu_set_root_entry(iommu); - - iommu->flush.flush_context(iommu, 0, 0, 0, - DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); iommu_enable_translation(iommu); iommu_disable_protect_mem_regions(iommu); } @@ -3857,8 +3859,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru) goto disable_iommu; iommu_set_root_entry(iommu); - iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); iommu_enable_translation(iommu); iommu_disable_protect_mem_regions(iommu); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index e0f8c2ade3e8..03faf20a6817 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -378,6 +378,7 @@ enum { /* PASID cache invalidation granu */ #define QI_PC_ALL_PASIDS 0 #define QI_PC_PASID_SEL 1 +#define QI_PC_GLOBAL 3 #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) -- cgit v1.2.3-71-gd317 From a8cf291bdac5d415eadb55e79df1fca8c3f0dfef Mon Sep 17 00:00:00 2001 From: Jianyong Wu Date: Wed, 9 Dec 2020 14:09:26 +0800 Subject: ptp: Reorganize ptp_kvm.c to make it arch-independent Currently, the ptp_kvm module contains a lot of x86-specific code. Let's move this code into a new arch-specific file in the same directory, and rename the arch-independent file to ptp_kvm_common.c. Acked-by: Richard Cochran Reviewed-by: Andre Przywara Signed-off-by: Jianyong Wu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20201209060932.212364-4-jianyong.wu@arm.com --- drivers/ptp/Makefile | 1 + drivers/ptp/ptp_kvm.c | 197 ------------------------------------------- drivers/ptp/ptp_kvm_common.c | 157 ++++++++++++++++++++++++++++++++++ drivers/ptp/ptp_kvm_x86.c | 97 +++++++++++++++++++++ include/linux/ptp_kvm.h | 19 +++++ 5 files changed, 274 insertions(+), 197 deletions(-) delete mode 100644 drivers/ptp/ptp_kvm.c create mode 100644 drivers/ptp/ptp_kvm_common.c create mode 100644 drivers/ptp/ptp_kvm_x86.c create mode 100644 include/linux/ptp_kvm.h (limited to 'include/linux') diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index db5aef3bddc6..d11eeb5811d1 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -4,6 +4,7 @@ # ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o +ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c deleted file mode 100644 index 658d33fc3195..000000000000 --- a/drivers/ptp/ptp_kvm.c +++ /dev/null @@ -1,197 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Virtual PTP 1588 clock for use with KVM guests - * - * Copyright (C) 2017 Red Hat Inc. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -struct kvm_ptp_clock { - struct ptp_clock *ptp_clock; - struct ptp_clock_info caps; -}; - -static DEFINE_SPINLOCK(kvm_ptp_lock); - -static struct pvclock_vsyscall_time_info *hv_clock; - -static struct kvm_clock_pairing clock_pair; -static phys_addr_t clock_pair_gpa; - -static int ptp_kvm_get_time_fn(ktime_t *device_time, - struct system_counterval_t *system_counter, - void *ctx) -{ - unsigned long ret; - struct timespec64 tspec; - unsigned version; - int cpu; - struct pvclock_vcpu_time_info *src; - - spin_lock(&kvm_ptp_lock); - - preempt_disable_notrace(); - cpu = smp_processor_id(); - src = &hv_clock[cpu].pvti; - - do { - /* - * We are using a TSC value read in the hosts - * kvm_hc_clock_pairing handling. - * So any changes to tsc_to_system_mul - * and tsc_shift or any other pvclock - * data invalidate that measurement. - */ - version = pvclock_read_begin(src); - - ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, - clock_pair_gpa, - KVM_CLOCK_PAIRING_WALLCLOCK); - if (ret != 0) { - pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret); - spin_unlock(&kvm_ptp_lock); - preempt_enable_notrace(); - return -EOPNOTSUPP; - } - - tspec.tv_sec = clock_pair.sec; - tspec.tv_nsec = clock_pair.nsec; - ret = __pvclock_read_cycles(src, clock_pair.tsc); - } while (pvclock_read_retry(src, version)); - - preempt_enable_notrace(); - - system_counter->cycles = ret; - system_counter->cs = &kvm_clock; - - *device_time = timespec64_to_ktime(tspec); - - spin_unlock(&kvm_ptp_lock); - - return 0; -} - -static int ptp_kvm_getcrosststamp(struct ptp_clock_info *ptp, - struct system_device_crosststamp *xtstamp) -{ - return get_device_system_crosststamp(ptp_kvm_get_time_fn, NULL, - NULL, xtstamp); -} - -/* - * PTP clock operations - */ - -static int ptp_kvm_adjfreq(struct ptp_clock_info *ptp, s32 ppb) -{ - return -EOPNOTSUPP; -} - -static int ptp_kvm_adjtime(struct ptp_clock_info *ptp, s64 delta) -{ - return -EOPNOTSUPP; -} - -static int ptp_kvm_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - return -EOPNOTSUPP; -} - -static int ptp_kvm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) -{ - unsigned long ret; - struct timespec64 tspec; - - spin_lock(&kvm_ptp_lock); - - ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, - clock_pair_gpa, - KVM_CLOCK_PAIRING_WALLCLOCK); - if (ret != 0) { - pr_err_ratelimited("clock offset hypercall ret %lu\n", ret); - spin_unlock(&kvm_ptp_lock); - return -EOPNOTSUPP; - } - - tspec.tv_sec = clock_pair.sec; - tspec.tv_nsec = clock_pair.nsec; - spin_unlock(&kvm_ptp_lock); - - memcpy(ts, &tspec, sizeof(struct timespec64)); - - return 0; -} - -static int ptp_kvm_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) -{ - return -EOPNOTSUPP; -} - -static const struct ptp_clock_info ptp_kvm_caps = { - .owner = THIS_MODULE, - .name = "KVM virtual PTP", - .max_adj = 0, - .n_ext_ts = 0, - .n_pins = 0, - .pps = 0, - .adjfreq = ptp_kvm_adjfreq, - .adjtime = ptp_kvm_adjtime, - .gettime64 = ptp_kvm_gettime, - .settime64 = ptp_kvm_settime, - .enable = ptp_kvm_enable, - .getcrosststamp = ptp_kvm_getcrosststamp, -}; - -/* module operations */ - -static struct kvm_ptp_clock kvm_ptp_clock; - -static void __exit ptp_kvm_exit(void) -{ - ptp_clock_unregister(kvm_ptp_clock.ptp_clock); -} - -static int __init ptp_kvm_init(void) -{ - long ret; - - if (!kvm_para_available()) - return -ENODEV; - - clock_pair_gpa = slow_virt_to_phys(&clock_pair); - hv_clock = pvclock_get_pvti_cpu0_va(); - - if (!hv_clock) - return -ENODEV; - - ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, - KVM_CLOCK_PAIRING_WALLCLOCK); - if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP) - return -ENODEV; - - kvm_ptp_clock.caps = ptp_kvm_caps; - - kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); - - return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock); -} - -module_init(ptp_kvm_init); -module_exit(ptp_kvm_exit); - -MODULE_AUTHOR("Marcelo Tosatti "); -MODULE_DESCRIPTION("PTP clock using KVMCLOCK"); -MODULE_LICENSE("GPL"); diff --git a/drivers/ptp/ptp_kvm_common.c b/drivers/ptp/ptp_kvm_common.c new file mode 100644 index 000000000000..721ddcede5e1 --- /dev/null +++ b/drivers/ptp/ptp_kvm_common.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Virtual PTP 1588 clock for use with KVM guests + * + * Copyright (C) 2017 Red Hat Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct kvm_ptp_clock { + struct ptp_clock *ptp_clock; + struct ptp_clock_info caps; +}; + +static DEFINE_SPINLOCK(kvm_ptp_lock); + +static int ptp_kvm_get_time_fn(ktime_t *device_time, + struct system_counterval_t *system_counter, + void *ctx) +{ + long ret; + u64 cycle; + struct timespec64 tspec; + struct clocksource *cs; + + spin_lock(&kvm_ptp_lock); + + preempt_disable_notrace(); + ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs); + if (ret) { + spin_unlock(&kvm_ptp_lock); + preempt_enable_notrace(); + return ret; + } + + preempt_enable_notrace(); + + system_counter->cycles = cycle; + system_counter->cs = cs; + + *device_time = timespec64_to_ktime(tspec); + + spin_unlock(&kvm_ptp_lock); + + return 0; +} + +static int ptp_kvm_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + return get_device_system_crosststamp(ptp_kvm_get_time_fn, NULL, + NULL, xtstamp); +} + +/* + * PTP clock operations + */ + +static int ptp_kvm_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + return -EOPNOTSUPP; +} + +static int ptp_kvm_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + return -EOPNOTSUPP; +} + +static int ptp_kvm_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + return -EOPNOTSUPP; +} + +static int ptp_kvm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + long ret; + struct timespec64 tspec; + + spin_lock(&kvm_ptp_lock); + + ret = kvm_arch_ptp_get_clock(&tspec); + if (ret) { + spin_unlock(&kvm_ptp_lock); + return ret; + } + + spin_unlock(&kvm_ptp_lock); + + memcpy(ts, &tspec, sizeof(struct timespec64)); + + return 0; +} + +static int ptp_kvm_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static const struct ptp_clock_info ptp_kvm_caps = { + .owner = THIS_MODULE, + .name = "KVM virtual PTP", + .max_adj = 0, + .n_ext_ts = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = ptp_kvm_adjfreq, + .adjtime = ptp_kvm_adjtime, + .gettime64 = ptp_kvm_gettime, + .settime64 = ptp_kvm_settime, + .enable = ptp_kvm_enable, + .getcrosststamp = ptp_kvm_getcrosststamp, +}; + +/* module operations */ + +static struct kvm_ptp_clock kvm_ptp_clock; + +static void __exit ptp_kvm_exit(void) +{ + ptp_clock_unregister(kvm_ptp_clock.ptp_clock); +} + +static int __init ptp_kvm_init(void) +{ + long ret; + + ret = kvm_arch_ptp_init(); + if (ret) { + pr_err("fail to initialize ptp_kvm"); + return ret; + } + + kvm_ptp_clock.caps = ptp_kvm_caps; + + kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); + + return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock); +} + +module_init(ptp_kvm_init); +module_exit(ptp_kvm_exit); + +MODULE_AUTHOR("Marcelo Tosatti "); +MODULE_DESCRIPTION("PTP clock using KVMCLOCK"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c new file mode 100644 index 000000000000..3dd519dfc473 --- /dev/null +++ b/drivers/ptp/ptp_kvm_x86.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Virtual PTP 1588 clock for use with KVM guests + * + * Copyright (C) 2017 Red Hat Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct pvclock_vsyscall_time_info *hv_clock; + +static phys_addr_t clock_pair_gpa; +static struct kvm_clock_pairing clock_pair; + +int kvm_arch_ptp_init(void) +{ + long ret; + + if (!kvm_para_available()) + return -ENODEV; + + clock_pair_gpa = slow_virt_to_phys(&clock_pair); + hv_clock = pvclock_get_pvti_cpu0_va(); + if (!hv_clock) + return -ENODEV; + + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, + KVM_CLOCK_PAIRING_WALLCLOCK); + if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP) + return -ENODEV; + + return 0; +} + +int kvm_arch_ptp_get_clock(struct timespec64 *ts) +{ + long ret; + + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, + clock_pair_gpa, + KVM_CLOCK_PAIRING_WALLCLOCK); + if (ret != 0) { + pr_err_ratelimited("clock offset hypercall ret %lu\n", ret); + return -EOPNOTSUPP; + } + + ts->tv_sec = clock_pair.sec; + ts->tv_nsec = clock_pair.nsec; + + return 0; +} + +int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, + struct clocksource **cs) +{ + struct pvclock_vcpu_time_info *src; + unsigned int version; + long ret; + int cpu; + + cpu = smp_processor_id(); + src = &hv_clock[cpu].pvti; + + do { + /* + * We are using a TSC value read in the hosts + * kvm_hc_clock_pairing handling. + * So any changes to tsc_to_system_mul + * and tsc_shift or any other pvclock + * data invalidate that measurement. + */ + version = pvclock_read_begin(src); + + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, + clock_pair_gpa, + KVM_CLOCK_PAIRING_WALLCLOCK); + if (ret != 0) { + pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret); + return -EOPNOTSUPP; + } + tspec->tv_sec = clock_pair.sec; + tspec->tv_nsec = clock_pair.nsec; + *cycle = __pvclock_read_cycles(src, clock_pair.tsc); + } while (pvclock_read_retry(src, version)); + + *cs = &kvm_clock; + + return 0; +} diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h new file mode 100644 index 000000000000..f960a719f0d5 --- /dev/null +++ b/include/linux/ptp_kvm.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Virtual PTP 1588 clock for use with KVM guests + * + * Copyright (C) 2017 Red Hat Inc. + */ + +#ifndef _PTP_KVM_H_ +#define _PTP_KVM_H_ + +struct timespec64; +struct clocksource; + +int kvm_arch_ptp_init(void); +int kvm_arch_ptp_get_clock(struct timespec64 *ts); +int kvm_arch_ptp_get_crosststamp(u64 *cycle, + struct timespec64 *tspec, struct clocksource **cs); + +#endif /* _PTP_KVM_H_ */ -- cgit v1.2.3-71-gd317 From b2c67cbe9f447312f5cdd7c6641b463f2349aec0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Dec 2020 14:09:27 +0800 Subject: time: Add mechanism to recognize clocksource in time_get_snapshot System time snapshots are not conveying information about the current clocksource which was used, but callers like the PTP KVM guest implementation have the requirement to evaluate the clocksource type to select the appropriate mechanism. Introduce a clocksource id field in struct clocksource which is by default set to CSID_GENERIC (0). Clocksource implementations can set that field to a value which allows to identify the clocksource. Store the clocksource id of the current clocksource in the system_time_snapshot so callers can evaluate which clocksource was used to take the snapshot and act accordingly. Signed-off-by: Thomas Gleixner Signed-off-by: Jianyong Wu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20201209060932.212364-5-jianyong.wu@arm.com --- include/linux/clocksource.h | 6 ++++++ include/linux/clocksource_ids.h | 11 +++++++++++ include/linux/timekeeping.h | 12 +++++++----- kernel/time/clocksource.c | 2 ++ kernel/time/timekeeping.c | 1 + 5 files changed, 27 insertions(+), 5 deletions(-) create mode 100644 include/linux/clocksource_ids.h (limited to 'include/linux') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 86d143db6523..1290d0dce840 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -62,6 +63,10 @@ struct module; * 400-499: Perfect * The ideal clocksource. A must-use where * available. + * @id: Defaults to CSID_GENERIC. The id value is captured + * in certain snapshot functions to allow callers to + * validate the clocksource from which the snapshot was + * taken. * @flags: Flags describing special properties * @enable: Optional function to enable the clocksource * @disable: Optional function to disable the clocksource @@ -100,6 +105,7 @@ struct clocksource { const char *name; struct list_head list; int rating; + enum clocksource_ids id; enum vdso_clock_mode vdso_clock_mode; unsigned long flags; diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h new file mode 100644 index 000000000000..4d8e19e05328 --- /dev/null +++ b/include/linux/clocksource_ids.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CLOCKSOURCE_IDS_H +#define _LINUX_CLOCKSOURCE_IDS_H + +/* Enum to give clocksources a unique identifier */ +enum clocksource_ids { + CSID_GENERIC = 0, + CSID_MAX, +}; + +#endif diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index c6792cf01bc7..78a98bdff76d 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -3,6 +3,7 @@ #define _LINUX_TIMEKEEPING_H #include +#include /* Included from linux/ktime.h */ @@ -243,11 +244,12 @@ struct ktime_timestamps { * @cs_was_changed_seq: The sequence number of clocksource change events */ struct system_time_snapshot { - u64 cycles; - ktime_t real; - ktime_t raw; - unsigned int clock_was_set_seq; - u8 cs_was_changed_seq; + u64 cycles; + ktime_t real; + ktime_t raw; + enum clocksource_ids cs_id; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; }; /** diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index cce484a2cc7c..4fe1df894ee5 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -920,6 +920,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) clocksource_arch_init(cs); + if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) + cs->id = CSID_GENERIC; if (cs->vdso_clock_mode < 0 || cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n", diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 6aee5768c86f..06f55f9258bf 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1048,6 +1048,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) do { seq = read_seqcount_begin(&tk_core.seq); now = tk_clock_read(&tk->tkr_mono); + systime_snapshot->cs_id = tk->tkr_mono.clock->id; systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; base_real = ktime_add(tk->tkr_mono.base, -- cgit v1.2.3-71-gd317 From 100148d0fc7dcf8672fe0ac83f44dc5749b4da5c Mon Sep 17 00:00:00 2001 From: Jianyong Wu Date: Wed, 9 Dec 2020 14:09:28 +0800 Subject: clocksource: Add clocksource id for arm arch counter Add clocksource id to the ARM generic counter so that it can be easily identified from callers such as ptp_kvm. Cc: Mark Rutland Reviewed-by: Andre Przywara Signed-off-by: Jianyong Wu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20201209060932.212364-6-jianyong.wu@arm.com --- drivers/clocksource/arm_arch_timer.c | 2 ++ include/linux/clocksource_ids.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index d0177824c518..8f12e223703f 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -191,6 +192,7 @@ static u64 arch_counter_read_cc(const struct cyclecounter *cc) static struct clocksource clocksource_counter = { .name = "arch_sys_counter", + .id = CSID_ARM_ARCH_COUNTER, .rating = 400, .read = arch_counter_read, .mask = CLOCKSOURCE_MASK(56), diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h index 4d8e19e05328..16775d7d8f8d 100644 --- a/include/linux/clocksource_ids.h +++ b/include/linux/clocksource_ids.h @@ -5,6 +5,7 @@ /* Enum to give clocksources a unique identifier */ enum clocksource_ids { CSID_GENERIC = 0, + CSID_ARM_ARCH_COUNTER, CSID_MAX, }; -- cgit v1.2.3-71-gd317 From 3bf725699bf62494b3e179f1795f08c7d749f061 Mon Sep 17 00:00:00 2001 From: Jianyong Wu Date: Wed, 9 Dec 2020 14:09:29 +0800 Subject: KVM: arm64: Add support for the KVM PTP service Implement the hypervisor side of the KVM PTP interface. The service offers wall time and cycle count from host to guest. The caller must specify whether they want the host's view of either the virtual or physical counter. Signed-off-by: Jianyong Wu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20201209060932.212364-7-jianyong.wu@arm.com --- Documentation/virt/kvm/api.rst | 10 +++++++ Documentation/virt/kvm/arm/index.rst | 1 + Documentation/virt/kvm/arm/ptp_kvm.rst | 25 ++++++++++++++++ arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/hypercalls.c | 53 ++++++++++++++++++++++++++++++++++ include/linux/arm-smccc.h | 16 ++++++++++ include/uapi/linux/kvm.h | 1 + 7 files changed, 107 insertions(+) create mode 100644 Documentation/virt/kvm/arm/ptp_kvm.rst (limited to 'include/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 38e327d4b479..987d99e39887 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6724,3 +6724,13 @@ vcpu_info is set. The KVM_XEN_HVM_CONFIG_RUNSTATE flag indicates that the runstate-related features KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR/_CURRENT/_DATA/_ADJUST are supported by the KVM_XEN_VCPU_SET_ATTR/KVM_XEN_VCPU_GET_ATTR ioctls. + +8.31 KVM_CAP_PTP_KVM +-------------------- + +:Architectures: arm64 + +This capability indicates that the KVM virtual PTP service is +supported in the host. A VMM can check whether the service is +available to the guest on migration. + diff --git a/Documentation/virt/kvm/arm/index.rst b/Documentation/virt/kvm/arm/index.rst index 3e2b2aba90fc..78a9b670aafe 100644 --- a/Documentation/virt/kvm/arm/index.rst +++ b/Documentation/virt/kvm/arm/index.rst @@ -10,3 +10,4 @@ ARM hyp-abi psci pvtime + ptp_kvm diff --git a/Documentation/virt/kvm/arm/ptp_kvm.rst b/Documentation/virt/kvm/arm/ptp_kvm.rst new file mode 100644 index 000000000000..68cffb50d8bf --- /dev/null +++ b/Documentation/virt/kvm/arm/ptp_kvm.rst @@ -0,0 +1,25 @@ +.. SPDX-License-Identifier: GPL-2.0 + +PTP_KVM support for arm/arm64 +============================= + +PTP_KVM is used for high precision time sync between host and guests. +It relies on transferring the wall clock and counter value from the +host to the guest using a KVM-specific hypercall. + +* ARM_SMCCC_HYP_KVM_PTP_FUNC_ID: 0x86000001 + +This hypercall uses the SMC32/HVC32 calling convention: + +ARM_SMCCC_HYP_KVM_PTP_FUNC_ID + ============= ========== ========== + Function ID: (uint32) 0x86000001 + Arguments: (uint32) KVM_PTP_VIRT_COUNTER(0) + KVM_PTP_PHYS_COUNTER(1) + Return Values: (int32) NOT_SUPPORTED(-1) on error, or + (uint32) Upper 32 bits of wall clock time (r0) + (uint32) Lower 32 bits of wall clock time (r1) + (uint32) Upper 32 bits of counter (r2) + (uint32) Lower 32 bits of counter (r3) + Endianness: No Restrictions. + ============= ========== ========== diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 7f06ba76698d..46401798c644 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -206,6 +206,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_INJECT_EXT_DABT: case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: + case KVM_CAP_PTP_KVM: r = 1; break; case KVM_CAP_ARM_SET_DEVICE_ADDR: diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 78d32c34d49c..30da78f72b3b 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -9,6 +9,55 @@ #include #include +static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) +{ + struct system_time_snapshot systime_snapshot; + u64 cycles = ~0UL; + u32 feature; + + /* + * system time and counter value must captured at the same + * time to keep consistency and precision. + */ + ktime_get_snapshot(&systime_snapshot); + + /* + * This is only valid if the current clocksource is the + * architected counter, as this is the only one the guest + * can see. + */ + if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER) + return; + + /* + * The guest selects one of the two reference counters + * (virtual or physical) with the first argument of the SMCCC + * call. In case the identifier is not supported, error out. + */ + feature = smccc_get_arg1(vcpu); + switch (feature) { + case KVM_PTP_VIRT_COUNTER: + cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2); + break; + case KVM_PTP_PHYS_COUNTER: + cycles = systime_snapshot.cycles; + break; + default: + return; + } + + /* + * This relies on the top bit of val[0] never being set for + * valid values of system time, because that is *really* far + * in the future (about 292 years from 1970, and at that stage + * nobody will give a damn about it). + */ + val[0] = upper_32_bits(systime_snapshot.real); + val[1] = lower_32_bits(systime_snapshot.real); + val[2] = upper_32_bits(cycles); + val[3] = lower_32_bits(cycles); +} + int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) { u32 func_id = smccc_get_function(vcpu); @@ -79,6 +128,10 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) break; case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES); + val[0] |= BIT(ARM_SMCCC_KVM_FUNC_PTP); + break; + case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: + kvm_ptp_get_time(vcpu, val); break; case ARM_SMCCC_TRNG_VERSION: case ARM_SMCCC_TRNG_FEATURES: diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 1a27bd9493fe..6861489a1890 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -103,6 +103,7 @@ /* KVM "vendor specific" services */ #define ARM_SMCCC_KVM_FUNC_FEATURES 0 +#define ARM_SMCCC_KVM_FUNC_PTP 1 #define ARM_SMCCC_KVM_FUNC_FEATURES_2 127 #define ARM_SMCCC_KVM_NUM_FUNCS 128 @@ -114,6 +115,21 @@ #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 +/* + * ptp_kvm is a feature used for time sync between vm and host. + * ptp_kvm module in guest kernel will get service from host using + * this hypercall ID. + */ +#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_PTP) + +/* ptp_kvm counter type ID */ +#define KVM_PTP_VIRT_COUNTER 0 +#define KVM_PTP_PHYS_COUNTER 1 + /* Paravirtualised time calls (defined by ARM DEN0057A) */ #define ARM_SMCCC_HV_PV_TIME_FEATURES \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index f6afee209620..0e0f70c0d0dc 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1078,6 +1078,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_DIRTY_LOG_RING 192 #define KVM_CAP_X86_BUS_LOCK_EXIT 193 #define KVM_CAP_PPC_DAWR1 194 +#define KVM_CAP_PTP_KVM 195 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-71-gd317 From 714638e02d94fa28c9e030d13d03e663fe24925e Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Mon, 29 Mar 2021 13:50:36 +0300 Subject: i2c: Add support for software nodes This makes it possible for the drivers to assign complete software fwnodes to the devices instead of only the device properties in those nodes. Signed-off-by: Heikki Krogerus Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 15 ++++++++++++++- include/linux/i2c.h | 4 +++- 2 files changed, 17 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 63ebf722a424..266b2013b1f1 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -919,15 +919,27 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf } } + if (info->swnode) { + status = device_add_software_node(&client->dev, info->swnode); + if (status) { + dev_err(&adap->dev, + "Failed to add software node to client %s: %d\n", + client->name, status); + goto out_free_props; + } + } + status = device_register(&client->dev); if (status) - goto out_free_props; + goto out_remove_swnode; dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n", client->name, dev_name(&client->dev)); return client; +out_remove_swnode: + device_remove_software_node(&client->dev); out_free_props: if (info->properties) device_remove_properties(&client->dev); @@ -960,6 +972,7 @@ void i2c_unregister_device(struct i2c_client *client) if (ACPI_COMPANION(&client->dev)) acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); + device_remove_software_node(&client->dev); device_unregister(&client->dev); } EXPORT_SYMBOL_GPL(i2c_unregister_device); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 56622658b215..cb1f882a3e88 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -391,7 +391,8 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * @platform_data: stored in i2c_client.dev.platform_data * @of_node: pointer to OpenFirmware device node * @fwnode: device node supplied by the platform firmware - * @properties: additional device properties for the device + * @properties: Deprecated - use swnode instead + * @swnode: software node for the device * @resources: resources associated with the device * @num_resources: number of resources in the @resources array * @irq: stored in i2c_client.irq @@ -416,6 +417,7 @@ struct i2c_board_info { struct device_node *of_node; struct fwnode_handle *fwnode; const struct property_entry *properties; + const struct software_node *swnode; const struct resource *resources; unsigned int num_resources; int irq; -- cgit v1.2.3-71-gd317 From 1a7a6e8072ea0e4582de2da63a9088841fde798e Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Tue, 6 Apr 2021 09:30:36 +0200 Subject: pwm: Clarify which state pwm_get_state() returns MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Given that lowlevel drivers usually cannot implement exactly what a consumer requests with pwm_apply_state() there is some rounding involved. pwm_get_state() returns the setting that was requested most recently by the consumer (opposed to what was actually implemented in hardware in reply to the last request). Clarify this in the function kerneldoc. Signed-off-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- Documentation/driver-api/pwm.rst | 6 +++++- include/linux/pwm.h | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/driver-api/pwm.rst b/Documentation/driver-api/pwm.rst index ab62f1bb0366..381f3c46cdac 100644 --- a/Documentation/driver-api/pwm.rst +++ b/Documentation/driver-api/pwm.rst @@ -55,7 +55,11 @@ several parameter at once. For example, if you see pwm_config() and pwm_{enable,disable}() calls in the same function, this probably means you should switch to pwm_apply_state(). -The PWM user API also allows one to query the PWM state with pwm_get_state(). +The PWM user API also allows one to query the last applied PWM state with +pwm_get_last_applied_state(). Note this is different to what the driver has +actually implemented if the request cannot be implemented exactly with the +hardware in use. There is currently no way for consumers to get the actually +implemented settings. In addition to the PWM state, the PWM API also exposes PWM arguments, which are the reference PWM config one should use on this PWM. diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 8f4eefd129aa..5bb90af4997e 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -91,6 +91,11 @@ struct pwm_device { * pwm_get_state() - retrieve the current PWM state * @pwm: PWM device * @state: state to fill with the current PWM state + * + * The returned PWM state represents the state that was applied by a previous call to + * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to + * hardware. If pwm_apply_state() was never called, this returns either the current hardware + * state (if supported) or the default settings. */ static inline void pwm_get_state(const struct pwm_device *pwm, struct pwm_state *state) -- cgit v1.2.3-71-gd317 From 4b2b4cc50ba6d607d1611ea6b2046a58d16e45eb Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Mon, 29 Mar 2021 13:50:47 +0300 Subject: i2c: Remove support for dangling device properties From now on only accepting complete software nodes. Signed-off-by: Heikki Krogerus Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-boardinfo.c | 11 ----------- drivers/i2c/i2c-core-base.c | 15 +-------------- include/linux/i2c.h | 2 -- 3 files changed, 1 insertion(+), 27 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c index 8bc51d4e69df..4df8ad092df3 100644 --- a/drivers/i2c/i2c-boardinfo.c +++ b/drivers/i2c/i2c-boardinfo.c @@ -47,7 +47,6 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num); * * The board info passed can safely be __initdata, but be careful of embedded * pointers (for platform_data, functions, etc) since that won't be copied. - * Device properties are deep-copied though. */ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len) { @@ -72,16 +71,6 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig devinfo->busnum = busnum; devinfo->board_info = *info; - if (info->properties) { - devinfo->board_info.properties = - property_entries_dup(info->properties); - if (IS_ERR(devinfo->board_info.properties)) { - status = PTR_ERR(devinfo->board_info.properties); - kfree(devinfo); - break; - } - } - if (info->resources) { devinfo->board_info.resources = kmemdup(info->resources, diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 266b2013b1f1..d296b5000a1b 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -909,23 +909,13 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf i2c_dev_set_name(adap, client, info); - if (info->properties) { - status = device_add_properties(&client->dev, info->properties); - if (status) { - dev_err(&adap->dev, - "Failed to add properties to client %s: %d\n", - client->name, status); - goto out_err_put_of_node; - } - } - if (info->swnode) { status = device_add_software_node(&client->dev, info->swnode); if (status) { dev_err(&adap->dev, "Failed to add software node to client %s: %d\n", client->name, status); - goto out_free_props; + goto out_err_put_of_node; } } @@ -940,9 +930,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf out_remove_swnode: device_remove_software_node(&client->dev); -out_free_props: - if (info->properties) - device_remove_properties(&client->dev); out_err_put_of_node: of_node_put(info->of_node); out_err: diff --git a/include/linux/i2c.h b/include/linux/i2c.h index cb1f882a3e88..54b3ccc71e37 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -391,7 +391,6 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * @platform_data: stored in i2c_client.dev.platform_data * @of_node: pointer to OpenFirmware device node * @fwnode: device node supplied by the platform firmware - * @properties: Deprecated - use swnode instead * @swnode: software node for the device * @resources: resources associated with the device * @num_resources: number of resources in the @resources array @@ -416,7 +415,6 @@ struct i2c_board_info { void *platform_data; struct device_node *of_node; struct fwnode_handle *fwnode; - const struct property_entry *properties; const struct software_node *swnode; const struct resource *resources; unsigned int num_resources; -- cgit v1.2.3-71-gd317 From 07740c92ae57ca21204f1e0c6f59272cdf3190cc Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 8 Apr 2021 19:17:17 +0800 Subject: i2c: core: add managed function for adding i2c adapters Some I2C controller drivers will only unregister the I2C adapter in their .remove() callback, which can be done by simply using a managed variant to add the I2C adapter. So add the managed functions for adding the I2C adapter. Reviewed-by: Andy Shevchenko Reviewed-by: Dmitry Osipenko Signed-off-by: Yicong Yang Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 26 ++++++++++++++++++++++++++ include/linux/i2c.h | 1 + 2 files changed, 27 insertions(+) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index c81cc9a09877..c4c28b585b50 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -1693,6 +1693,32 @@ void i2c_del_adapter(struct i2c_adapter *adap) } EXPORT_SYMBOL(i2c_del_adapter); +static void devm_i2c_del_adapter(void *adapter) +{ + i2c_del_adapter(adapter); +} + +/** + * devm_i2c_add_adapter - device-managed variant of i2c_add_adapter() + * @dev: managing device for adding this I2C adapter + * @adapter: the adapter to add + * Context: can sleep + * + * Add adapter with dynamic bus number, same with i2c_add_adapter() + * but the adapter will be auto deleted on driver detach. + */ +int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter) +{ + int ret; + + ret = i2c_add_adapter(adapter); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_i2c_del_adapter, adapter); +} +EXPORT_SYMBOL_GPL(devm_i2c_add_adapter); + static void i2c_parse_timing(struct device *dev, char *prop_name, u32 *cur_val_p, u32 def_val, bool use_def) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 59b892c5bb05..ce473ddc0378 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -846,6 +846,7 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) */ #if IS_ENABLED(CONFIG_I2C) int i2c_add_adapter(struct i2c_adapter *adap); +int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter); void i2c_del_adapter(struct i2c_adapter *adap); int i2c_add_numbered_adapter(struct i2c_adapter *adap); -- cgit v1.2.3-71-gd317 From 3b4c747cd32078172dd238929e38a43cfed83580 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 8 Apr 2021 19:17:18 +0800 Subject: i2c: core: add api to provide frequency mode strings Some I2C drivers like Designware and HiSilicon will print the bus frequency mode information, so add a public one that everyone can make use of. Tested-by: Jarkko Nikula Reviewed-by: Jarkko Nikula Reviewed-by: Andy Shevchenko Signed-off-by: Yicong Yang Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 21 +++++++++++++++++++++ include/linux/i2c.h | 3 +++ 2 files changed, 24 insertions(+) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index c4c28b585b50..5c16083694bf 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -76,6 +76,27 @@ void i2c_transfer_trace_unreg(void) static_branch_dec(&i2c_trace_msg_key); } +const char *i2c_freq_mode_string(u32 bus_freq_hz) +{ + switch (bus_freq_hz) { + case I2C_MAX_STANDARD_MODE_FREQ: + return "Standard Mode (100 kHz)"; + case I2C_MAX_FAST_MODE_FREQ: + return "Fast Mode (400 kHz)"; + case I2C_MAX_FAST_MODE_PLUS_FREQ: + return "Fast Mode Plus (1.0 MHz)"; + case I2C_MAX_TURBO_MODE_FREQ: + return "Turbo Mode (1.4 MHz)"; + case I2C_MAX_HIGH_SPEED_MODE_FREQ: + return "High Speed Mode (3.4 MHz)"; + case I2C_MAX_ULTRA_FAST_MODE_FREQ: + return "Ultra Fast Mode (5.0 MHz)"; + default: + return "Unknown Mode"; + } +} +EXPORT_SYMBOL_GPL(i2c_freq_mode_string); + const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, const struct i2c_client *client) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index ce473ddc0378..e5d8b9dad6bf 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -51,6 +51,9 @@ struct module; struct property_entry; #if IS_ENABLED(CONFIG_I2C) +/* Return the Frequency mode string based on the bus frequency */ +const char *i2c_freq_mode_string(u32 bus_freq_hz); + /* * The master routines are the ones normally used to transmit data to devices * on a bus (or read from them). Apart from two basic transfer functions to -- cgit v1.2.3-71-gd317 From 5b5475826c5265cead7ce4ca6d34ec0c566c70aa Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sun, 21 Mar 2021 18:38:32 -0700 Subject: i2c: ensure timely release of driver-allocated resources More and more drivers rely on devres to manage their resources, however if bus' probe() and release() methods are not trivial and control some of resources as well (for example enable or disable clocks, or attach device to a power domain), we need to make sure that driver-allocated resources are released immediately after driver's remove() method returns, and not postponed until driver core gets around to releasing resources. To fix that we open a new devres group before calling driver's probe() and explicitly release it when we return from driver's remove(). Tested-by: Jeff LaBundy Signed-off-by: Dmitry Torokhov Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 21 ++++++++++++++++++++- include/linux/i2c.h | 3 +++ 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 5c16083694bf..4351bf529d0f 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -539,6 +539,13 @@ static int i2c_device_probe(struct device *dev) if (status) goto err_clear_wakeup_irq; + client->devres_group_id = devres_open_group(&client->dev, NULL, + GFP_KERNEL); + if (!client->devres_group_id) { + status = -ENOMEM; + goto err_detach_pm_domain; + } + /* * When there are no more users of probe(), * rename probe_new to probe. @@ -551,11 +558,21 @@ static int i2c_device_probe(struct device *dev) else status = -EINVAL; + /* + * Note that we are not closing the devres group opened above so + * even resources that were attached to the device after probe is + * run are released when i2c_device_remove() is executed. This is + * needed as some drivers would allocate additional resources, + * for example when updating firmware. + */ + if (status) - goto err_detach_pm_domain; + goto err_release_driver_resources; return 0; +err_release_driver_resources: + devres_release_group(&client->dev, client->devres_group_id); err_detach_pm_domain: dev_pm_domain_detach(&client->dev, true); err_clear_wakeup_irq: @@ -584,6 +601,8 @@ static int i2c_device_remove(struct device *dev) dev_warn(dev, "remove failed (%pe), will be ignored\n", ERR_PTR(status)); } + devres_release_group(&client->dev, client->devres_group_id); + dev_pm_domain_detach(&client->dev, true); dev_pm_clear_wake_irq(&client->dev); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index e5d8b9dad6bf..e8f2ac8c9c3d 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -309,6 +309,8 @@ struct i2c_driver { * userspace_devices list * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter * calls it to pass on slave events to the slave driver. + * @devres_group_id: id of the devres group that will be created for resources + * acquired when probing this device. * * An i2c_client identifies a single device (i.e. chip) connected to an * i2c bus. The behaviour exposed to Linux is defined by the driver @@ -337,6 +339,7 @@ struct i2c_client { #if IS_ENABLED(CONFIG_I2C_SLAVE) i2c_slave_cb_t slave_cb; /* callback for slave mode */ #endif + void *devres_group_id; /* ID of probe devres group */ }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) -- cgit v1.2.3-71-gd317 From 332d1a0373be32a3a3c152756bca45ff4f4e11b5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 25 Mar 2021 18:15:36 -0400 Subject: NFS: nfs4_bitmask_adjust() must not change the server global bitmasks As currently set, the calls to nfs4_bitmask_adjust() will end up overwriting the contents of the nfs_server cache_consistency_bitmask field. The intention here should be to modify a private copy of that mask in the close/delegreturn/write arguments. Fixes: 76bd5c016ef4 ("NFSv4: make cache consistency bitmask dynamic") Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 56 ++++++++++++++++++++++++++++++------------------- include/linux/nfs_xdr.h | 11 +++++++--- 2 files changed, 42 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index bca1726bcd55..1cf98c40e3b2 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -108,9 +108,10 @@ static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *, const struct cred *, bool); #endif -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode, - struct nfs_server *server, - struct nfs4_label *label); +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], + const __u32 *src, struct inode *inode, + struct nfs_server *server, + struct nfs4_label *label); #ifdef CONFIG_NFS_V4_SECURITY_LABEL static inline struct nfs4_label * @@ -3591,6 +3592,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) struct nfs4_closedata *calldata = data; struct nfs4_state *state = calldata->state; struct inode *inode = calldata->inode; + struct nfs_server *server = NFS_SERVER(inode); struct pnfs_layout_hdr *lo; bool is_rdonly, is_wronly, is_rdwr; int call_close = 0; @@ -3647,8 +3649,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { /* Close-to-open cache consistency revalidation */ if (!nfs4_have_delegation(inode, FMODE_READ)) { - calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask; - nfs4_bitmask_adjust(calldata->arg.bitmask, inode, NFS_SERVER(inode), NULL); + nfs4_bitmask_set(calldata->arg.bitmask_store, + server->cache_consistency_bitmask, + inode, server, NULL); + calldata->arg.bitmask = calldata->arg.bitmask_store; } else calldata->arg.bitmask = NULL; } @@ -5416,19 +5420,17 @@ bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; } -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode, - struct nfs_server *server, - struct nfs4_label *label) +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src, + struct inode *inode, struct nfs_server *server, + struct nfs4_label *label) { - unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity); + unsigned int i; - if ((cache_validity & NFS_INO_INVALID_DATA) || - (cache_validity & NFS_INO_REVAL_PAGECACHE) || - (cache_validity & NFS_INO_REVAL_FORCED) || - (cache_validity & NFS_INO_INVALID_OTHER)) - nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode); + memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); + if (cache_validity & (NFS_INO_INVALID_CHANGE | NFS_INO_REVAL_PAGECACHE)) + bitmask[0] |= FATTR4_WORD0_CHANGE; if (cache_validity & NFS_INO_INVALID_ATIME) bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; if (cache_validity & NFS_INO_INVALID_OTHER) @@ -5437,16 +5439,22 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode, FATTR4_WORD1_NUMLINKS; if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL) bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL; - if (cache_validity & NFS_INO_INVALID_CHANGE) - bitmask[0] |= FATTR4_WORD0_CHANGE; if (cache_validity & NFS_INO_INVALID_CTIME) bitmask[1] |= FATTR4_WORD1_TIME_METADATA; if (cache_validity & NFS_INO_INVALID_MTIME) bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; - if (cache_validity & NFS_INO_INVALID_SIZE) - bitmask[0] |= FATTR4_WORD0_SIZE; if (cache_validity & NFS_INO_INVALID_BLOCKS) bitmask[1] |= FATTR4_WORD1_SPACE_USED; + + if (nfs4_have_delegation(inode, FMODE_READ) && + !(cache_validity & NFS_INO_REVAL_FORCED)) + bitmask[0] &= ~FATTR4_WORD0_SIZE; + else if (cache_validity & + (NFS_INO_INVALID_SIZE | NFS_INO_REVAL_PAGECACHE)) + bitmask[0] |= FATTR4_WORD0_SIZE; + + for (i = 0; i < NFS4_BITMASK_SZ; i++) + bitmask[i] &= server->attr_bitmask[i]; } static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, @@ -5459,8 +5467,10 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, hdr->args.bitmask = NULL; hdr->res.fattr = NULL; } else { - hdr->args.bitmask = server->cache_consistency_bitmask; - nfs4_bitmask_adjust(hdr->args.bitmask, hdr->inode, server, NULL); + nfs4_bitmask_set(hdr->args.bitmask_store, + server->cache_consistency_bitmask, + hdr->inode, server, NULL); + hdr->args.bitmask = hdr->args.bitmask_store; } if (!hdr->pgio_done_cb) @@ -6502,8 +6512,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, data->args.fhandle = &data->fh; data->args.stateid = &data->stateid; - data->args.bitmask = server->cache_consistency_bitmask; - nfs4_bitmask_adjust(data->args.bitmask, inode, server, NULL); + nfs4_bitmask_set(data->args.bitmask_store, + server->cache_consistency_bitmask, inode, server, + NULL); + data->args.bitmask = data->args.bitmask_store; nfs_copy_fh(&data->fh, NFS_FH(inode)); nfs4_stateid_copy(&data->stateid, stateid); data->res.fattr = &data->fattr; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 3327239fa2f9..cc29dee508f7 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -15,6 +15,8 @@ #define NFS_DEF_FILE_IO_SIZE (4096U) #define NFS_MIN_FILE_IO_SIZE (1024U) +#define NFS_BITMASK_SZ 3 + struct nfs4_string { unsigned int len; char *data; @@ -525,7 +527,8 @@ struct nfs_closeargs { struct nfs_seqid * seqid; fmode_t fmode; u32 share_access; - u32 * bitmask; + const u32 * bitmask; + u32 bitmask_store[NFS_BITMASK_SZ]; struct nfs4_layoutreturn_args *lr_args; }; @@ -608,7 +611,8 @@ struct nfs4_delegreturnargs { struct nfs4_sequence_args seq_args; const struct nfs_fh *fhandle; const nfs4_stateid *stateid; - u32 * bitmask; + const u32 *bitmask; + u32 bitmask_store[NFS_BITMASK_SZ]; struct nfs4_layoutreturn_args *lr_args; }; @@ -648,7 +652,8 @@ struct nfs_pgio_args { union { unsigned int replen; /* used by read */ struct { - u32 * bitmask; /* used by write */ + const u32 * bitmask; /* used by write */ + u32 bitmask_store[NFS_BITMASK_SZ]; /* used by write */ enum nfs3_stable_how stable; /* used by write */ }; }; -- cgit v1.2.3-71-gd317 From 1f3208b2d6975f31b9c7c6bf174b84fe9c97492f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 25 Mar 2021 11:04:34 -0400 Subject: NFS: Add a cache validity flag argument to nfs_revalidate_inode() Add an argument to nfs_revalidate_inode() to allow callers to specify which attributes they need to check for validity. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 2 +- fs/nfs/export.c | 6 +----- fs/nfs/inode.c | 25 +++++++------------------ fs/nfs/nfs3acl.c | 2 +- fs/nfs/nfs4proc.c | 6 +++--- include/linux/nfs_fs.h | 2 +- 6 files changed, 14 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 0cd7c59a6601..e924d65c125e 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -3006,7 +3006,7 @@ out_notsup: if (mask & MAY_NOT_BLOCK) return -ECHILD; - res = nfs_revalidate_inode(NFS_SERVER(inode), inode); + res = nfs_revalidate_inode(inode, NFS_INO_INVALID_OTHER); if (res == 0) res = generic_permission(&init_user_ns, inode, mask); goto out; diff --git a/fs/nfs/export.c b/fs/nfs/export.c index b347e3ce0cc8..37a1a88df771 100644 --- a/fs/nfs/export.c +++ b/fs/nfs/export.c @@ -169,11 +169,7 @@ out: static u64 nfs_fetch_iversion(struct inode *inode) { - struct nfs_server *server = NFS_SERVER(inode); - - if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_CHANGE | - NFS_INO_REVAL_PAGECACHE)) - __nfs_revalidate_inode(server, inode); + nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); return inode_peek_iversion_raw(inode); } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d34da63202cc..b9aac408f03a 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -802,16 +802,6 @@ static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry) dput(parent); } -static bool nfs_need_revalidate_inode(struct inode *inode) -{ - if (NFS_I(inode)->cache_validity & - (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) - return true; - if (nfs_attribute_cache_expired(inode)) - return true; - return false; -} - static u32 nfs_get_valid_attrmask(struct inode *inode) { unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity); @@ -1004,7 +994,6 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync) { struct nfs_inode *nfsi; struct inode *inode; - struct nfs_server *server; if (!(ctx->mode & FMODE_WRITE)) return; @@ -1020,10 +1009,10 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync) return; if (!list_empty(&nfsi->open_files)) return; - server = NFS_SERVER(inode); - if (server->flags & NFS_MOUNT_NOCTO) + if (NFS_SERVER(inode)->flags & NFS_MOUNT_NOCTO) return; - nfs_revalidate_inode(server, inode); + nfs_revalidate_inode(inode, + NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE); } EXPORT_SYMBOL_GPL(nfs_close_context); @@ -1278,16 +1267,16 @@ int nfs_attribute_cache_expired(struct inode *inode) /** * nfs_revalidate_inode - Revalidate the inode attributes - * @server: pointer to nfs_server struct * @inode: pointer to inode struct + * @flags: cache flags to check * * Updates inode attribute information by retrieving the data from the server. */ -int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) +int nfs_revalidate_inode(struct inode *inode, unsigned long flags) { - if (!nfs_need_revalidate_inode(inode)) + if (!nfs_check_cache_invalid(inode, flags)) return NFS_STALE(inode) ? -ESTALE : 0; - return __nfs_revalidate_inode(server, inode); + return __nfs_revalidate_inode(NFS_SERVER(inode), inode); } EXPORT_SYMBOL_GPL(nfs_revalidate_inode); diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index bb386a691e69..9ec560aa4a50 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -65,7 +65,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type) if (!nfs_server_capable(inode, NFS_CAP_ACLS)) return ERR_PTR(-EOPNOTSUPP); - status = nfs_revalidate_inode(server, inode); + status = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); if (status < 0) return ERR_PTR(status); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1cf98c40e3b2..6b990fe5bc1f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5868,7 +5868,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; - ret = nfs_revalidate_inode(server, inode); + ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); if (ret < 0) return ret; if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) @@ -7619,7 +7619,7 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, return -EACCES; } - ret = nfs_revalidate_inode(NFS_SERVER(inode), inode); + ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); if (ret) return ret; @@ -7650,7 +7650,7 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) return 0; } - ret = nfs_revalidate_inode(NFS_SERVER(inode), inode); + ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); if (ret) return ret; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index eadaabd18dc7..624ffd47a9d4 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -386,7 +386,7 @@ extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct user_namespace *, struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_attribute_cache_expired(struct inode *inode); -extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); +extern int nfs_revalidate_inode(struct inode *inode, unsigned long flags); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_clear_invalid_mapping(struct address_space *mapping); extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); -- cgit v1.2.3-71-gd317 From fabf2b341502e894001d70f91309dd6f3785e2dc Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 25 Mar 2021 13:14:42 -0400 Subject: NFS: Separate tracking of file nlinks cache validity from the mode/uid/gid Rename can cause us to revalidate the access cache, so lets track the nlinks separately from the mode/uid/gid. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 2 +- fs/nfs/inode.c | 15 ++++++++++----- fs/nfs/nfs4proc.c | 13 +++++++------ fs/nfs/nfstrace.h | 4 +++- include/linux/nfs_fs.h | 2 ++ 5 files changed, 23 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index e924d65c125e..f748d2294261 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1711,7 +1711,7 @@ static void nfs_drop_nlink(struct inode *inode) NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter(); nfs_set_cache_invalid( inode, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME | - NFS_INO_INVALID_OTHER | NFS_INO_REVAL_FORCED); + NFS_INO_INVALID_NLINK | NFS_INO_REVAL_FORCED); spin_unlock(&inode->i_lock); } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 3d18e66a4b8f..7bf9138330f2 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -538,7 +538,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st if (fattr->valid & NFS_ATTR_FATTR_NLINK) set_nlink(inode, fattr->nlink); else if (nfs_server_capable(inode, NFS_CAP_NLINK)) - nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); + nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK); if (fattr->valid & NFS_ATTR_FATTR_OWNER) inode->i_uid = fattr->uid; else if (nfs_server_capable(inode, NFS_CAP_OWNER)) @@ -801,8 +801,10 @@ static u32 nfs_get_valid_attrmask(struct inode *inode) reply_mask |= STATX_MTIME; if (!(cache_validity & NFS_INO_INVALID_SIZE)) reply_mask |= STATX_SIZE; + if (!(cache_validity & NFS_INO_INVALID_NLINK)) + reply_mask |= STATX_NLINK; if (!(cache_validity & NFS_INO_INVALID_OTHER)) - reply_mask |= STATX_UID | STATX_GID | STATX_MODE | STATX_NLINK; + reply_mask |= STATX_UID | STATX_GID | STATX_MODE; if (!(cache_validity & NFS_INO_INVALID_BLOCKS)) reply_mask |= STATX_BLOCKS; return reply_mask; @@ -868,7 +870,9 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path, do_update |= cache_validity & NFS_INO_INVALID_MTIME; if (request_mask & STATX_SIZE) do_update |= cache_validity & NFS_INO_INVALID_SIZE; - if (request_mask & (STATX_UID | STATX_GID | STATX_MODE | STATX_NLINK)) + if (request_mask & STATX_NLINK) + do_update |= cache_validity & NFS_INO_INVALID_NLINK; + if (request_mask & (STATX_UID | STATX_GID | STATX_MODE)) do_update |= cache_validity & NFS_INO_INVALID_OTHER; if (request_mask & STATX_BLOCKS) do_update |= cache_validity & NFS_INO_INVALID_BLOCKS; @@ -1518,7 +1522,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat /* Has the link count changed? */ if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink) - invalid |= NFS_INO_INVALID_OTHER; + invalid |= NFS_INO_INVALID_NLINK; ts = inode->i_atime; if ((fattr->valid & NFS_ATTR_FATTR_ATIME) && !timespec64_equal(&ts, &fattr->atime)) @@ -1942,6 +1946,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE | NFS_INO_INVALID_BLOCKS + | NFS_INO_INVALID_NLINK | NFS_INO_INVALID_OTHER; if (S_ISDIR(inode->i_mode)) nfs_force_lookup_revalidate(inode); @@ -2074,7 +2079,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) } } else if (server->caps & NFS_CAP_NLINK) { nfsi->cache_validity |= save_cache_validity & - (NFS_INO_INVALID_OTHER + (NFS_INO_INVALID_NLINK | NFS_INO_REVAL_FORCED); cache_revalidated = false; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8dc595ce40ca..a74c1c3c4192 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1167,14 +1167,14 @@ int nfs4_call_sync(struct rpc_clnt *clnt, static void nfs4_inc_nlink_locked(struct inode *inode) { - nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); + nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK); inc_nlink(inode); } static void nfs4_dec_nlink_locked(struct inode *inode) { - nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); + nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK); drop_nlink(inode); } @@ -4717,11 +4717,11 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, /* Note: If we moved a directory, nlink will change */ nfs4_update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start, - NFS_INO_INVALID_OTHER | + NFS_INO_INVALID_NLINK | NFS_INO_INVALID_DATA); nfs4_update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start, - NFS_INO_INVALID_OTHER | + NFS_INO_INVALID_NLINK | NFS_INO_INVALID_DATA); } else nfs4_update_changeattr(old_dir, &res->old_cinfo, @@ -5433,8 +5433,9 @@ static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src, bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; if (cache_validity & NFS_INO_INVALID_OTHER) bitmask[1] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | - FATTR4_WORD1_OWNER_GROUP | - FATTR4_WORD1_NUMLINKS; + FATTR4_WORD1_OWNER_GROUP; + if (cache_validity & NFS_INO_INVALID_NLINK) + bitmask[1] |= FATTR4_WORD1_NUMLINKS; if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL) bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL; if (cache_validity & NFS_INO_INVALID_CTIME) diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index cdba6eebe3cb..a0ebc53160dd 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -48,6 +48,7 @@ TRACE_DEFINE_ENUM(NFS_INO_INVALID_OTHER); TRACE_DEFINE_ENUM(NFS_INO_DATA_INVAL_DEFER); TRACE_DEFINE_ENUM(NFS_INO_INVALID_BLOCKS); TRACE_DEFINE_ENUM(NFS_INO_INVALID_XATTR); +TRACE_DEFINE_ENUM(NFS_INO_INVALID_NLINK); #define nfs_show_cache_validity(v) \ __print_flags(v, "|", \ @@ -65,7 +66,8 @@ TRACE_DEFINE_ENUM(NFS_INO_INVALID_XATTR); { NFS_INO_INVALID_OTHER, "INVALID_OTHER" }, \ { NFS_INO_DATA_INVAL_DEFER, "DATA_INVAL_DEFER" }, \ { NFS_INO_INVALID_BLOCKS, "INVALID_BLOCKS" }, \ - { NFS_INO_INVALID_XATTR, "INVALID_XATTR" }) + { NFS_INO_INVALID_XATTR, "INVALID_XATTR" }, \ + { NFS_INO_INVALID_NLINK, "INVALID_NLINK" }) TRACE_DEFINE_ENUM(NFS_INO_ADVISE_RDPLUS); TRACE_DEFINE_ENUM(NFS_INO_STALE); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 624ffd47a9d4..41165b988dfb 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -246,11 +246,13 @@ struct nfs4_copy_state { BIT(13) /* Deferred cache invalidation */ #define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ #define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ +#define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ | NFS_INO_INVALID_MTIME \ | NFS_INO_INVALID_SIZE \ + | NFS_INO_INVALID_NLINK \ | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */ /* -- cgit v1.2.3-71-gd317 From 720869eb19f3161980d6d4631d3df7e8c5355993 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 13 Apr 2021 09:41:16 -0400 Subject: NFS: Separate tracking of file mode cache validity from the uid/gid chown()/chgrp() and chmod() are separate operations, and in addition, there are mode operations that are performed automatically by the server. So let's track mode validity separately from the file ownership validity. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 5 +++-- fs/nfs/inode.c | 18 ++++++++++++------ fs/nfs/nfs4proc.c | 14 +++++++++----- fs/nfs/nfstrace.h | 4 +++- fs/nfs/write.c | 2 +- include/linux/nfs_fs.h | 2 ++ 6 files changed, 30 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index f748d2294261..d2835d211a73 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2948,7 +2948,7 @@ static int nfs_execute_ok(struct inode *inode, int mask) if (S_ISDIR(inode->i_mode)) return 0; - if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_OTHER)) { + if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_MODE)) { if (mask & MAY_NOT_BLOCK) return -ECHILD; ret = __nfs_revalidate_inode(server, inode); @@ -3006,7 +3006,8 @@ out_notsup: if (mask & MAY_NOT_BLOCK) return -ECHILD; - res = nfs_revalidate_inode(inode, NFS_INO_INVALID_OTHER); + res = nfs_revalidate_inode(inode, NFS_INO_INVALID_MODE | + NFS_INO_INVALID_OTHER); if (res == 0) res = generic_permission(&init_user_ns, inode, mask); goto out; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 7bf9138330f2..81e3e140e923 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -199,7 +199,8 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) if (have_delegation) { if (!(flags & NFS_INO_REVAL_FORCED)) - flags &= ~NFS_INO_INVALID_OTHER; + flags &= ~(NFS_INO_INVALID_MODE | + NFS_INO_INVALID_OTHER); flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE | NFS_INO_INVALID_XATTR); @@ -472,7 +473,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st nfsi->cache_validity = 0; if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0 && nfs_server_capable(inode, NFS_CAP_MODE)) - nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); + nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); /* Why so? Because we want revalidate for devices/FIFOs, and * that's precisely what we have in nfs_file_inode_operations. */ @@ -803,8 +804,10 @@ static u32 nfs_get_valid_attrmask(struct inode *inode) reply_mask |= STATX_SIZE; if (!(cache_validity & NFS_INO_INVALID_NLINK)) reply_mask |= STATX_NLINK; + if (!(cache_validity & NFS_INO_INVALID_MODE)) + reply_mask |= STATX_MODE; if (!(cache_validity & NFS_INO_INVALID_OTHER)) - reply_mask |= STATX_UID | STATX_GID | STATX_MODE; + reply_mask |= STATX_UID | STATX_GID; if (!(cache_validity & NFS_INO_INVALID_BLOCKS)) reply_mask |= STATX_BLOCKS; return reply_mask; @@ -872,7 +875,9 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path, do_update |= cache_validity & NFS_INO_INVALID_SIZE; if (request_mask & STATX_NLINK) do_update |= cache_validity & NFS_INO_INVALID_NLINK; - if (request_mask & (STATX_UID | STATX_GID | STATX_MODE)) + if (request_mask & STATX_MODE) + do_update |= cache_validity & NFS_INO_INVALID_MODE; + if (request_mask & (STATX_UID | STATX_GID)) do_update |= cache_validity & NFS_INO_INVALID_OTHER; if (request_mask & STATX_BLOCKS) do_update |= cache_validity & NFS_INO_INVALID_BLOCKS; @@ -1510,7 +1515,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) invalid |= NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL - | NFS_INO_INVALID_OTHER; + | NFS_INO_INVALID_MODE; if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, fattr->uid)) invalid |= NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL @@ -1947,6 +1952,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | NFS_INO_INVALID_SIZE | NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK + | NFS_INO_INVALID_MODE | NFS_INO_INVALID_OTHER; if (S_ISDIR(inode->i_mode)) nfs_force_lookup_revalidate(inode); @@ -2037,7 +2043,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) } } else if (server->caps & NFS_CAP_MODE) { nfsi->cache_validity |= save_cache_validity & - (NFS_INO_INVALID_OTHER + (NFS_INO_INVALID_MODE | NFS_INO_REVAL_FORCED); cache_revalidated = false; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a74c1c3c4192..bc90f2a12d5d 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -302,9 +302,10 @@ static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, if (!(cache_validity & NFS_INO_INVALID_CHANGE)) dst[0] &= ~FATTR4_WORD0_CHANGE; + if (!(cache_validity & NFS_INO_INVALID_MODE)) + dst[1] &= ~FATTR4_WORD1_MODE; if (!(cache_validity & NFS_INO_INVALID_OTHER)) - dst[1] &= ~(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | - FATTR4_WORD1_OWNER_GROUP); + dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); } static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, @@ -3344,7 +3345,9 @@ static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, unsigned long adjust_flags = NFS_INO_INVALID_CHANGE; int err; - if (sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) + if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) + adjust_flags |= NFS_INO_INVALID_MODE; + if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) adjust_flags |= NFS_INO_INVALID_OTHER; do { @@ -5431,9 +5434,10 @@ static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src, bitmask[0] |= FATTR4_WORD0_CHANGE; if (cache_validity & NFS_INO_INVALID_ATIME) bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; + if (cache_validity & NFS_INO_INVALID_MODE) + bitmask[1] |= FATTR4_WORD1_MODE; if (cache_validity & NFS_INO_INVALID_OTHER) - bitmask[1] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | - FATTR4_WORD1_OWNER_GROUP; + bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; if (cache_validity & NFS_INO_INVALID_NLINK) bitmask[1] |= FATTR4_WORD1_NUMLINKS; if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL) diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index a0ebc53160dd..41a161cd31f6 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -49,6 +49,7 @@ TRACE_DEFINE_ENUM(NFS_INO_DATA_INVAL_DEFER); TRACE_DEFINE_ENUM(NFS_INO_INVALID_BLOCKS); TRACE_DEFINE_ENUM(NFS_INO_INVALID_XATTR); TRACE_DEFINE_ENUM(NFS_INO_INVALID_NLINK); +TRACE_DEFINE_ENUM(NFS_INO_INVALID_MODE); #define nfs_show_cache_validity(v) \ __print_flags(v, "|", \ @@ -67,7 +68,8 @@ TRACE_DEFINE_ENUM(NFS_INO_INVALID_NLINK); { NFS_INO_DATA_INVAL_DEFER, "DATA_INVAL_DEFER" }, \ { NFS_INO_INVALID_BLOCKS, "INVALID_BLOCKS" }, \ { NFS_INO_INVALID_XATTR, "INVALID_XATTR" }, \ - { NFS_INO_INVALID_NLINK, "INVALID_NLINK" }) + { NFS_INO_INVALID_NLINK, "INVALID_NLINK" }, \ + { NFS_INO_INVALID_MODE, "INVALID_MODE" }) TRACE_DEFINE_ENUM(NFS_INO_ADVISE_RDPLUS); TRACE_DEFINE_ENUM(NFS_INO_STALE); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 7a39b3d424da..61d1174935b6 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1604,7 +1604,7 @@ static int nfs_writeback_done(struct rpc_task *task, /* Deal with the suid/sgid bit corner case */ if (nfs_should_remove_suid(inode)) { spin_lock(&inode->i_lock); - nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); + nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); spin_unlock(&inode->i_lock); } return 0; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 41165b988dfb..ffba254d2098 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -247,12 +247,14 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ #define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ #define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */ +#define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ | NFS_INO_INVALID_MTIME \ | NFS_INO_INVALID_SIZE \ | NFS_INO_INVALID_NLINK \ + | NFS_INO_INVALID_MODE \ | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */ /* -- cgit v1.2.3-71-gd317 From 7f08a3359a3c1e39c2a118fbbe583d8c8db14ace Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 26 Mar 2021 09:50:19 -0400 Subject: NFSv4: Add support for the NFSv4.2 "change_attr_type" attribute The change_attr_type allows the server to provide a description of how the change attribute will behave. This again will allow the client to optimise its behaviour w.r.t. attribute revalidation. Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 3 +++ fs/nfs/nfs3xdr.c | 1 + fs/nfs/nfs4proc.c | 1 + fs/nfs/nfs4xdr.c | 32 ++++++++++++++++++++++++++++++++ fs/nfs/proc.c | 1 + include/linux/nfs4.h | 9 +++++++++ include/linux/nfs_fs_sb.h | 3 +++ include/linux/nfs_xdr.h | 2 ++ 8 files changed, 52 insertions(+) (limited to 'include/linux') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 399a8eb15397..2aeb4e52a4f1 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -792,6 +792,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, server->maxfilesize = fsinfo->maxfilesize; server->time_delta = fsinfo->time_delta; + server->change_attr_type = fsinfo->change_attr_type; server->clone_blksize = fsinfo->clone_blksize; /* We're airborne Set socket buffersize */ @@ -933,6 +934,8 @@ struct nfs_server *nfs_alloc_server(void) return NULL; } + server->change_attr_type = NFS4_CHANGE_TYPE_IS_UNDEFINED; + ida_init(&server->openowner_id); ida_init(&server->lockowner_id); pnfs_init_server(server); diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index ed1c83738c30..83ad62c81fc7 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -2227,6 +2227,7 @@ static int decode_fsinfo3resok(struct xdr_stream *xdr, /* ignore properties */ result->lease_time = 0; + result->change_attr_type = NFS4_CHANGE_TYPE_IS_TIME_METADATA; return 0; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index bc90f2a12d5d..6992c88a25e7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -264,6 +264,7 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD1_FS_LAYOUT_TYPES, FATTR4_WORD2_LAYOUT_BLKSIZE | FATTR4_WORD2_CLONE_BLKSIZE + | FATTR4_WORD2_CHANGE_ATTR_TYPE | FATTR4_WORD2_XATTR_SUPPORT }; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index d8a1911dd39e..edac4718dec1 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -153,6 +153,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, 5 /* fs layout types */ + \ 1 /* layout blksize */ + \ 1 /* clone blksize */ + \ + 1 /* change attr type */ + \ 1 /* xattr support */) #define encode_renew_maxsz (op_encode_hdr_maxsz + 3) #define decode_renew_maxsz (op_decode_hdr_maxsz) @@ -4846,6 +4847,32 @@ static int decode_attr_clone_blksize(struct xdr_stream *xdr, uint32_t *bitmap, return 0; } +static int decode_attr_change_attr_type(struct xdr_stream *xdr, + uint32_t *bitmap, + enum nfs4_change_attr_type *res) +{ + u32 tmp = NFS4_CHANGE_TYPE_IS_UNDEFINED; + + dprintk("%s: bitmap is %x\n", __func__, bitmap[2]); + if (bitmap[2] & FATTR4_WORD2_CHANGE_ATTR_TYPE) { + if (xdr_stream_decode_u32(xdr, &tmp)) + return -EIO; + bitmap[2] &= ~FATTR4_WORD2_CHANGE_ATTR_TYPE; + } + + switch(tmp) { + case NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR: + case NFS4_CHANGE_TYPE_IS_VERSION_COUNTER: + case NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS: + case NFS4_CHANGE_TYPE_IS_TIME_METADATA: + *res = tmp; + break; + default: + *res = NFS4_CHANGE_TYPE_IS_UNDEFINED; + } + return 0; +} + static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) { unsigned int savep; @@ -4894,6 +4921,11 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) if (status) goto xdr_error; + status = decode_attr_change_attr_type(xdr, bitmap, + &fsinfo->change_attr_type); + if (status) + goto xdr_error; + status = decode_attr_xattrsupport(xdr, bitmap, &fsinfo->xattr_support); if (status) diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 73ab7c59d3a7..ea19dbf12301 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -91,6 +91,7 @@ nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, info->dtpref = fsinfo.tsize; info->maxfilesize = 0x7FFFFFFF; info->lease_time = 0; + info->change_attr_type = NFS4_CHANGE_TYPE_IS_TIME_METADATA; return 0; } diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 5b4c67c91f56..15004c469807 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -452,6 +452,7 @@ enum lock_type4 { #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) #define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) +#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) #define FATTR4_WORD2_MODE_UMASK (1UL << 17) #define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) @@ -709,6 +710,14 @@ struct nl4_server { } u; }; +enum nfs4_change_attr_type { + NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2, + NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3, + NFS4_CHANGE_TYPE_IS_UNDEFINED = 4, +}; + /* * Options for setxattr. These match the flags for setxattr(2). */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 6f76b32a0238..fbcdfd9f7a7f 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -180,6 +180,9 @@ struct nfs_server { #define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ #define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */ + enum nfs4_change_attr_type + change_attr_type;/* Description of change attribute */ + struct nfs_fsid fsid; __u64 maxfilesize; /* maximum file size */ struct timespec64 time_delta; /* smallest time granularity */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index cc29dee508f7..717ecc87c9e7 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -152,6 +152,8 @@ struct nfs_fsinfo { __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */ __u32 blksize; /* preferred pnfs io block size */ __u32 clone_blksize; /* granularity of a CLONE operation */ + enum nfs4_change_attr_type + change_attr_type; /* Info about change attr */ __u32 xattr_support; /* User xattrs supported */ }; -- cgit v1.2.3-71-gd317 From 63f9c44bca5e10fb1fd86aee7e38039ed98f95cc Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Sun, 11 Apr 2021 15:29:18 +0300 Subject: net/mlx5: Add MEMIC operations related bits Add the MEMIC operations bits and structures to the mlx5_ifc file. Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky --- include/linux/mlx5/mlx5_ifc.h | 42 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 432290b58a0b..47241ebfcf7d 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -133,6 +133,7 @@ enum { MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, MLX5_CMD_OP_ALLOC_MEMIC = 0x205, MLX5_CMD_OP_DEALLOC_MEMIC = 0x206, + MLX5_CMD_OP_MODIFY_MEMIC = 0x207, MLX5_CMD_OP_CREATE_EQ = 0x301, MLX5_CMD_OP_DESTROY_EQ = 0x302, MLX5_CMD_OP_QUERY_EQ = 0x303, @@ -1017,7 +1018,11 @@ struct mlx5_ifc_device_mem_cap_bits { u8 header_modify_sw_icm_start_address[0x40]; - u8 reserved_at_180[0x680]; + u8 reserved_at_180[0x80]; + + u8 memic_operations[0x20]; + + u8 reserved_at_220[0x5e0]; }; struct mlx5_ifc_device_event_cap_bits { @@ -10417,6 +10422,41 @@ struct mlx5_ifc_destroy_vport_lag_in_bits { u8 reserved_at_40[0x40]; }; +enum { + MLX5_MODIFY_MEMIC_OP_MOD_ALLOC, + MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC, +}; + +struct mlx5_ifc_modify_memic_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x18]; + u8 memic_operation_type[0x8]; + + u8 memic_start_addr[0x40]; + + u8 reserved_at_c0[0x140]; +}; + +struct mlx5_ifc_modify_memic_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 memic_operation_addr[0x40]; + + u8 reserved_at_c0[0x140]; +}; + struct mlx5_ifc_alloc_memic_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; -- cgit v1.2.3-71-gd317 From ce62b114bbad9346641d16853c528ba01513e1b0 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 5 Mar 2018 15:01:18 -0500 Subject: NFS: Split attribute support out from the server capabilities There are lots of attributes, and they are crowding out the bit space. Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 15 +++++++++++--- fs/nfs/inode.c | 51 ++++++++++++++++++++++++++++------------------- fs/nfs/nfs4proc.c | 49 +++++++++++++++++++++++---------------------- include/linux/nfs_fs_sb.h | 11 ++-------- 4 files changed, 70 insertions(+), 56 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 2aeb4e52a4f1..cfeaadf56bf0 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -696,9 +696,18 @@ static int nfs_init_server(struct nfs_server *server, /* Initialise the client representation from the mount data */ server->flags = ctx->flags; server->options = ctx->options; - server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| - NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| - NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME; + server->caps |= NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS; + + switch (clp->rpc_ops->version) { + case 2: + server->fattr_valid = NFS_ATTR_FATTR_V2; + break; + case 3: + server->fattr_valid = NFS_ATTR_FATTR_V3; + break; + default: + server->fattr_valid = NFS_ATTR_FATTR_V4; + } if (ctx->rsize) server->rsize = nfs_block_size(ctx->rsize, NULL); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 7fa914e24fc4..6d04ebb4f084 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -438,6 +438,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st .fattr = fattr }; struct inode *inode = ERR_PTR(-ENOENT); + u64 fattr_supported = NFS_SB(sb)->fattr_valid; unsigned long hash; nfs_attr_check_mountpoint(sb, fattr); @@ -470,7 +471,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st inode->i_mode = fattr->mode; nfsi->cache_validity = 0; if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0 - && nfs_server_capable(inode, NFS_CAP_MODE)) + && (fattr_supported & NFS_ATTR_FATTR_MODE)) nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); /* Why so? Because we want revalidate for devices/FIFOs, and * that's precisely what we have in nfs_file_inode_operations. @@ -516,15 +517,15 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st nfsi->attr_gencount = fattr->gencount; if (fattr->valid & NFS_ATTR_FATTR_ATIME) inode->i_atime = fattr->atime; - else if (nfs_server_capable(inode, NFS_CAP_ATIME)) + else if (fattr_supported & NFS_ATTR_FATTR_ATIME) nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME); if (fattr->valid & NFS_ATTR_FATTR_MTIME) inode->i_mtime = fattr->mtime; - else if (nfs_server_capable(inode, NFS_CAP_MTIME)) + else if (fattr_supported & NFS_ATTR_FATTR_MTIME) nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME); if (fattr->valid & NFS_ATTR_FATTR_CTIME) inode->i_ctime = fattr->ctime; - else if (nfs_server_capable(inode, NFS_CAP_CTIME)) + else if (fattr_supported & NFS_ATTR_FATTR_CTIME) nfs_set_cache_invalid(inode, NFS_INO_INVALID_CTIME); if (fattr->valid & NFS_ATTR_FATTR_CHANGE) inode_set_iversion_raw(inode, fattr->change_attr); @@ -536,26 +537,30 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st nfs_set_cache_invalid(inode, NFS_INO_INVALID_SIZE); if (fattr->valid & NFS_ATTR_FATTR_NLINK) set_nlink(inode, fattr->nlink); - else if (nfs_server_capable(inode, NFS_CAP_NLINK)) + else if (fattr_supported & NFS_ATTR_FATTR_NLINK) nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK); if (fattr->valid & NFS_ATTR_FATTR_OWNER) inode->i_uid = fattr->uid; - else if (nfs_server_capable(inode, NFS_CAP_OWNER)) + else if (fattr_supported & NFS_ATTR_FATTR_OWNER) nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); if (fattr->valid & NFS_ATTR_FATTR_GROUP) inode->i_gid = fattr->gid; - else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) + else if (fattr_supported & NFS_ATTR_FATTR_GROUP) nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); if (nfs_server_capable(inode, NFS_CAP_XATTR)) nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR); if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) inode->i_blocks = fattr->du.nfs2.blocks; - else if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { + else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED && + fattr->size != 0) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS); + if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { /* * report the blocks in 512byte units */ inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used); - } else if (fattr->size != 0) + } else if (fattr_supported & NFS_ATTR_FATTR_SPACE_USED && + fattr->size != 0) nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS); nfs_setsecurity(inode, fattr, label); @@ -1952,9 +1957,10 @@ EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc); */ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) { - struct nfs_server *server; + struct nfs_server *server = NFS_SERVER(inode); struct nfs_inode *nfsi = NFS_I(inode); loff_t cur_isize, new_isize; + u64 fattr_supported = server->fattr_valid; unsigned long invalid = 0; unsigned long now = jiffies; unsigned long save_cache_validity; @@ -1998,7 +2004,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) goto out_err; } - server = NFS_SERVER(inode); /* Update the fsid? */ if (S_ISDIR(inode->i_mode) && (fattr->valid & NFS_ATTR_FATTR_FSID) && !nfs_fsid_equal(&server->fsid, &fattr->fsid) && @@ -2066,7 +2071,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (fattr->valid & NFS_ATTR_FATTR_MTIME) { inode->i_mtime = fattr->mtime; - } else if (server->caps & NFS_CAP_MTIME) { + } else if (fattr_supported & NFS_ATTR_FATTR_MTIME) { nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_MTIME; cache_revalidated = false; @@ -2074,7 +2079,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (fattr->valid & NFS_ATTR_FATTR_CTIME) { inode->i_ctime = fattr->ctime; - } else if (server->caps & NFS_CAP_CTIME) { + } else if (fattr_supported & NFS_ATTR_FATTR_CTIME) { nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_CTIME; cache_revalidated = false; @@ -2114,7 +2119,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (fattr->valid & NFS_ATTR_FATTR_ATIME) inode->i_atime = fattr->atime; - else if (server->caps & NFS_CAP_ATIME) { + else if (fattr_supported & NFS_ATTR_FATTR_ATIME) { nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_ATIME; cache_revalidated = false; @@ -2129,7 +2134,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | NFS_INO_INVALID_ACL; attr_changed = true; } - } else if (server->caps & NFS_CAP_MODE) { + } else if (fattr_supported & NFS_ATTR_FATTR_MODE) { nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_MODE; cache_revalidated = false; @@ -2142,7 +2147,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode->i_uid = fattr->uid; attr_changed = true; } - } else if (server->caps & NFS_CAP_OWNER) { + } else if (fattr_supported & NFS_ATTR_FATTR_OWNER) { nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_OTHER; cache_revalidated = false; @@ -2155,7 +2160,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode->i_gid = fattr->gid; attr_changed = true; } - } else if (server->caps & NFS_CAP_OWNER_GROUP) { + } else if (fattr_supported & NFS_ATTR_FATTR_GROUP) { nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_OTHER; cache_revalidated = false; @@ -2168,7 +2173,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) set_nlink(inode, fattr->nlink); attr_changed = true; } - } else if (server->caps & NFS_CAP_NLINK) { + } else if (fattr_supported & NFS_ATTR_FATTR_NLINK) { nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_NLINK; cache_revalidated = false; @@ -2179,9 +2184,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) * report the blocks in 512byte units */ inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used); - } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) + } else if (fattr_supported & NFS_ATTR_FATTR_SPACE_USED) { + nfsi->cache_validity |= + save_cache_validity & NFS_INO_INVALID_BLOCKS; + cache_revalidated = false; + } + + if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) { inode->i_blocks = fattr->du.nfs2.blocks; - else { + } else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED) { nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_BLOCKS; cache_revalidated = false; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index bcbb057d5529..21c31aebb116 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3868,12 +3868,9 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; } memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); - server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| - NFS_CAP_SYMLINKS|NFS_CAP_FILEID| - NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| - NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| - NFS_CAP_CTIME|NFS_CAP_MTIME| - NFS_CAP_SECURITY_LABEL); + server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | + NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL); + server->fattr_valid = NFS_ATTR_FATTR_V4; if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) server->caps |= NFS_CAP_ACLS; @@ -3881,25 +3878,29 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f server->caps |= NFS_CAP_HARDLINKS; if (res.has_symlinks != 0) server->caps |= NFS_CAP_SYMLINKS; - if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) - server->caps |= NFS_CAP_FILEID; - if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) - server->caps |= NFS_CAP_MODE; - if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) - server->caps |= NFS_CAP_NLINK; - if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) - server->caps |= NFS_CAP_OWNER; - if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) - server->caps |= NFS_CAP_OWNER_GROUP; - if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) - server->caps |= NFS_CAP_ATIME; - if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) - server->caps |= NFS_CAP_CTIME; - if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) - server->caps |= NFS_CAP_MTIME; + if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) + server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; + if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) + server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; + if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) + server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; + if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) + server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | + NFS_ATTR_FATTR_OWNER_NAME); + if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) + server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | + NFS_ATTR_FATTR_GROUP_NAME); + if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) + server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; + if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) + server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; + if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) + server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; + if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) + server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; #ifdef CONFIG_NFS_V4_SECURITY_LABEL - if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) - server->caps |= NFS_CAP_SECURITY_LABEL; + if (!(res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)) + server->fattr_valid &= ~NFS_ATTR_FATTR_V4_SECURITY_LABEL; #endif memcpy(server->attr_bitmask_nl, res.attr_bitmask, sizeof(server->attr_bitmask)); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index fbcdfd9f7a7f..d28d7a62864f 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -191,6 +191,8 @@ struct nfs_server { dev_t s_dev; /* superblock dev numbers */ struct nfs_auth_info auth_info; /* parsed auth flavors */ + __u64 fattr_valid; /* Valid attributes */ + #ifdef CONFIG_NFS_FSCACHE struct nfs_fscache_key *fscache_key; /* unique key for superblock */ struct fscache_cookie *fscache; /* superblock cookie */ @@ -267,16 +269,7 @@ struct nfs_server { #define NFS_CAP_SYMLINKS (1U << 2) #define NFS_CAP_ACLS (1U << 3) #define NFS_CAP_ATOMIC_OPEN (1U << 4) -/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */ #define NFS_CAP_LGOPEN (1U << 5) -#define NFS_CAP_FILEID (1U << 6) -#define NFS_CAP_MODE (1U << 7) -#define NFS_CAP_NLINK (1U << 8) -#define NFS_CAP_OWNER (1U << 9) -#define NFS_CAP_OWNER_GROUP (1U << 10) -#define NFS_CAP_ATIME (1U << 11) -#define NFS_CAP_CTIME (1U << 12) -#define NFS_CAP_MTIME (1U << 13) #define NFS_CAP_POSIX_LOCK (1U << 14) #define NFS_CAP_UIDGID_NOMAP (1U << 15) #define NFS_CAP_STATEID_NFSV41 (1U << 16) -- cgit v1.2.3-71-gd317 From d3debfcc4e3f65f1370ad4ca2ab61e7f0ff683cd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 14 Apr 2021 17:41:14 +0100 Subject: bug: Provide dummy version of bug_get_file_line() when !GENERIC_BUG Provide the missing dummy bug_get_file_line() implementation when GENENERIC_BUG isn't selected. Reported-by: kernel test robot Fixes: 26dbc7e299c7 ("bug: Factor out a getter for a bug's file line") Cc: Andrew Scull Signed-off-by: Marc Zyngier --- include/linux/bug.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bug.h b/include/linux/bug.h index e3841bee4c8d..348acf2558f3 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -61,6 +61,13 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, return BUG_TRAP_TYPE_BUG; } +struct bug_entry; +static inline void bug_get_file_line(struct bug_entry *bug, const char **file, + unsigned int *line) +{ + *file = NULL; + *line = 0; +} static inline void generic_bug_clear_once(void) {} -- cgit v1.2.3-71-gd317 From 80e5d1ff5d5f1ed5167a69b7c2fe86071b615f6b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 15 Apr 2021 19:46:50 -0400 Subject: useful constants: struct qstr for ".." Signed-off-by: Al Viro --- fs/dcache.c | 2 ++ fs/ext2/namei.c | 3 +-- fs/ext4/namei.c | 3 +-- fs/f2fs/dir.c | 4 +--- fs/f2fs/namei.c | 3 +-- fs/fuse/inode.c | 3 +-- fs/nilfs2/namei.c | 3 +-- fs/udf/namei.c | 3 +-- fs/ufs/super.c | 3 +-- include/linux/dcache.h | 1 + 10 files changed, 11 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/fs/dcache.c b/fs/dcache.c index 7d24ff7eb206..cf871a81f4fd 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -84,6 +84,8 @@ const struct qstr empty_name = QSTR_INIT("", 0); EXPORT_SYMBOL(empty_name); const struct qstr slash_name = QSTR_INIT("/", 1); EXPORT_SYMBOL(slash_name); +const struct qstr dotdot_name = QSTR_INIT("..", 2); +EXPORT_SYMBOL(dotdot_name); /* * This is the single most critical data structure when it comes diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 3367384d344d..765fb389550f 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -81,11 +81,10 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, uns struct dentry *ext2_get_parent(struct dentry *child) { - struct qstr dotdot = QSTR_INIT("..", 2); ino_t ino; int res; - res = ext2_inode_by_name(d_inode(child), &dotdot, &ino); + res = ext2_inode_by_name(d_inode(child), &dotdot_name, &ino); if (res) return ERR_PTR(res); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 686bf982c84e..a9db885abb8b 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1739,11 +1739,10 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi struct dentry *ext4_get_parent(struct dentry *child) { __u32 ino; - static const struct qstr dotdot = QSTR_INIT("..", 2); struct ext4_dir_entry_2 * de; struct buffer_head *bh; - bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL); + bh = ext4_find_entry(d_inode(child), &dotdot_name, &de, NULL); if (IS_ERR(bh)) return ERR_CAST(bh); if (!bh) diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index e6270a867be1..e211a1b6b013 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -449,9 +449,7 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p) { - struct qstr dotdot = QSTR_INIT("..", 2); - - return f2fs_find_entry(dir, &dotdot, p); + return f2fs_find_entry(dir, &dotdot_name, p); } ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 17bd072a5d39..f2ad7931e949 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -416,9 +416,8 @@ out: struct dentry *f2fs_get_parent(struct dentry *child) { - struct qstr dotdot = QSTR_INIT("..", 2); struct page *page; - unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page); + unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot_name, &page); if (!ino) { if (IS_ERR(page)) return ERR_CAST(page); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index b0e18b470e91..8665e4d24691 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -872,14 +872,13 @@ static struct dentry *fuse_get_parent(struct dentry *child) struct inode *inode; struct dentry *parent; struct fuse_entry_out outarg; - const struct qstr name = QSTR_INIT("..", 2); int err; if (!fc->export_support) return ERR_PTR(-ESTALE); err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode), - &name, &outarg, &inode); + &dotdot_name, &outarg, &inode); if (err) { if (err == -ENOENT) return ERR_PTR(-ESTALE); diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index ecace5f96a95..b2709874ced5 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -440,10 +440,9 @@ static struct dentry *nilfs_get_parent(struct dentry *child) { unsigned long ino; struct inode *inode; - struct qstr dotdot = QSTR_INIT("..", 2); struct nilfs_root *root; - ino = nilfs_inode_by_name(d_inode(child), &dotdot); + ino = nilfs_inode_by_name(d_inode(child), &dotdot_name); if (!ino) return ERR_PTR(-ENOENT); diff --git a/fs/udf/namei.c b/fs/udf/namei.c index f146b3089f3d..3ae9f1e91984 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -1215,11 +1215,10 @@ static struct dentry *udf_get_parent(struct dentry *child) { struct kernel_lb_addr tloc; struct inode *inode = NULL; - struct qstr dotdot = QSTR_INIT("..", 2); struct fileIdentDesc cfi; struct udf_fileident_bh fibh; - if (!udf_find_entry(d_inode(child), &dotdot, &fibh, &cfi)) + if (!udf_find_entry(d_inode(child), &dotdot_name, &fibh, &cfi)) return ERR_PTR(-EACCES); if (fibh.sbh != fibh.ebh) diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 983558b572c7..74028b5a7b0a 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -128,10 +128,9 @@ static struct dentry *ufs_fh_to_parent(struct super_block *sb, struct fid *fid, static struct dentry *ufs_get_parent(struct dentry *child) { - struct qstr dot_dot = QSTR_INIT("..", 2); ino_t ino; - ino = ufs_inode_by_name(d_inode(child), &dot_dot); + ino = ufs_inode_by_name(d_inode(child), &dotdot_name); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(ufs_iget(child->d_sb, ino)); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 4ecde5d8250c..9e23d33bb6f1 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -59,6 +59,7 @@ struct qstr { extern const struct qstr empty_name; extern const struct qstr slash_name; +extern const struct qstr dotdot_name; struct dentry_stat_t { long nr_dentry; -- cgit v1.2.3-71-gd317 From 42eb0d54c08a0331d6d295420f602237968d792b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 25 Mar 2021 09:22:09 +0100 Subject: fs: split receive_fd_replace from __receive_fd receive_fd_replace shares almost no code with the general case, so split it out. Also remove the "Bump the sock usage counts" comment from both copies, as that is now what __receive_sock actually does. [AV: ... and make the only user of receive_fd_replace() choose between it and receive_fd() according to what userland had passed to it in flags] Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/file.c | 39 +++++++++++++++++++-------------------- include/linux/file.h | 11 ++++------- kernel/seccomp.c | 17 ++++++++++++----- 3 files changed, 35 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/fs/file.c b/fs/file.c index f3a4bac2cbe9..d8ccb95a7f41 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1068,8 +1068,6 @@ out_unlock: /** * __receive_fd() - Install received file into file descriptor table - * - * @fd: fd to install into (if negative, a new fd will be allocated) * @file: struct file that was received from another process * @ufd: __user pointer to write new fd number to * @o_flags: the O_* flags to apply to the new fd entry @@ -1083,7 +1081,7 @@ out_unlock: * * Returns newly install fd or -ve on error. */ -int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags) +int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) { int new_fd; int error; @@ -1092,32 +1090,33 @@ int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flag if (error) return error; - if (fd < 0) { - new_fd = get_unused_fd_flags(o_flags); - if (new_fd < 0) - return new_fd; - } else { - new_fd = fd; - } + new_fd = get_unused_fd_flags(o_flags); + if (new_fd < 0) + return new_fd; if (ufd) { error = put_user(new_fd, ufd); if (error) { - if (fd < 0) - put_unused_fd(new_fd); + put_unused_fd(new_fd); return error; } } - if (fd < 0) { - fd_install(new_fd, get_file(file)); - } else { - error = replace_fd(new_fd, file, o_flags); - if (error) - return error; - } + fd_install(new_fd, get_file(file)); + __receive_sock(file); + return new_fd; +} - /* Bump the sock usage counts, if any. */ +int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags) +{ + int error; + + error = security_file_receive(file); + if (error) + return error; + error = replace_fd(new_fd, file, o_flags); + if (error) + return error; __receive_sock(file); return new_fd; } diff --git a/include/linux/file.h b/include/linux/file.h index 225982792fa2..2de2e4613d7b 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -92,23 +92,20 @@ extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); -extern int __receive_fd(int fd, struct file *file, int __user *ufd, +extern int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags); static inline int receive_fd_user(struct file *file, int __user *ufd, unsigned int o_flags) { if (ufd == NULL) return -EFAULT; - return __receive_fd(-1, file, ufd, o_flags); + return __receive_fd(file, ufd, o_flags); } static inline int receive_fd(struct file *file, unsigned int o_flags) { - return __receive_fd(-1, file, NULL, o_flags); -} -static inline int receive_fd_replace(int fd, struct file *file, unsigned int o_flags) -{ - return __receive_fd(fd, file, NULL, o_flags); + return __receive_fd(file, NULL, o_flags); } +int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags); extern void flush_delayed_fput(void); extern void __fput_sync(struct file *); diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 1d60fc2c9987..4fe19cecaa94 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -119,8 +119,11 @@ struct seccomp_kaddfd { int fd; unsigned int flags; - /* To only be set on reply */ - int ret; + union { + bool setfd; + /* To only be set on reply */ + int ret; + }; struct completion completion; struct list_head list; }; @@ -1069,7 +1072,11 @@ static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd) * that it has been handled. */ list_del_init(&addfd->list); - addfd->ret = receive_fd_replace(addfd->fd, addfd->file, addfd->flags); + if (!addfd->setfd) + addfd->ret = receive_fd(addfd->file, addfd->flags); + else + addfd->ret = receive_fd_replace(addfd->fd, addfd->file, + addfd->flags); complete(&addfd->completion); } @@ -1583,8 +1590,8 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter, return -EBADF; kaddfd.flags = addfd.newfd_flags; - kaddfd.fd = (addfd.flags & SECCOMP_ADDFD_FLAG_SETFD) ? - addfd.newfd : -1; + kaddfd.setfd = addfd.flags & SECCOMP_ADDFD_FLAG_SETFD; + kaddfd.fd = addfd.newfd; init_completion(&kaddfd.completion); ret = mutex_lock_interruptible(&filter->notify_lock); -- cgit v1.2.3-71-gd317 From c0aec6680b6c82fe893a546e322e1130cd5cf21e Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 1 Apr 2021 14:56:25 +0100 Subject: iommu: Statically set module owner It happens that the 3 drivers which first supported being modular are also ones which play games with their pgsize_bitmap, so have non-const iommu_ops where dynamically setting the owner manages to work out OK. However, it's less than ideal to force that upon all drivers which want to be modular - like the new sprd-iommu driver which now has a potential bug in that regard - so let's just statically set the module owner and let ops remain const wherever possible. Reviewed-by: Christoph Hellwig Acked-by: Will Deacon Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/31423b99ff609c3d4b291c701a7a7a810d9ce8dc.1617285386.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 1 + drivers/iommu/arm/arm-smmu/arm-smmu.c | 1 + drivers/iommu/sprd-iommu.c | 1 + drivers/iommu/virtio-iommu.c | 1 + include/linux/iommu.h | 9 +-------- 5 files changed, 5 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 6f8ffc86a115..bee7ce246dd2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2674,6 +2674,7 @@ static struct iommu_ops arm_smmu_ops = { .sva_unbind = arm_smmu_sva_unbind, .sva_get_pasid = arm_smmu_sva_get_pasid, .pgsize_bitmap = -1UL, /* Restricted during device attach */ + .owner = THIS_MODULE, }; /* Probing and initialisation functions */ diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 3c6adcdb201b..837a2f1816b7 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1577,6 +1577,7 @@ static struct iommu_ops arm_smmu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .def_domain_type = arm_smmu_def_domain_type, .pgsize_bitmap = -1UL, /* Restricted during device attach */ + .owner = THIS_MODULE, }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index 3811a5085b50..89272f9fd8f3 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -436,6 +436,7 @@ static const struct iommu_ops sprd_iommu_ops = { .device_group = sprd_iommu_device_group, .of_xlate = sprd_iommu_of_xlate, .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT, + .owner = THIS_MODULE, }; static const struct of_device_id sprd_iommu_of_match[] = { diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 2bfdd5734844..594ed827e944 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -945,6 +945,7 @@ static struct iommu_ops viommu_ops = { .get_resv_regions = viommu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .of_xlate = viommu_of_xlate, + .owner = THIS_MODULE, }; static int viommu_init_vqs(struct viommu_dev *viommu) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9223a8266b08..ce904cf4e774 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -361,19 +361,12 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); -static inline void __iommu_device_set_ops(struct iommu_device *iommu, +static inline void iommu_device_set_ops(struct iommu_device *iommu, const struct iommu_ops *ops) { iommu->ops = ops; } -#define iommu_device_set_ops(iommu, ops) \ -do { \ - struct iommu_ops *__ops = (struct iommu_ops *)(ops); \ - __ops->owner = THIS_MODULE; \ - __iommu_device_set_ops(iommu, __ops); \ -} while (0) - static inline void iommu_device_set_fwnode(struct iommu_device *iommu, struct fwnode_handle *fwnode) { -- cgit v1.2.3-71-gd317 From 2d471b20c55e13c98d1dba413bf2de618e89cdac Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 1 Apr 2021 14:56:26 +0100 Subject: iommu: Streamline registration interface Rather than have separate opaque setter functions that are easy to overlook and lead to repetitive boilerplate in drivers, let's pass the relevant initialisation parameters directly to iommu_device_register(). Acked-by: Will Deacon Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/ab001b87c533b6f4db71eb90db6f888953986c36.1617285386.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/init.c | 3 +-- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 5 +---- drivers/iommu/arm/arm-smmu/arm-smmu.c | 5 +---- drivers/iommu/arm/arm-smmu/qcom_iommu.c | 5 +---- drivers/iommu/exynos-iommu.c | 5 +---- drivers/iommu/fsl_pamu_domain.c | 4 +--- drivers/iommu/intel/dmar.c | 4 +--- drivers/iommu/intel/iommu.c | 3 +-- drivers/iommu/iommu.c | 19 +++++++++++++++++- drivers/iommu/ipmmu-vmsa.c | 6 +----- drivers/iommu/msm_iommu.c | 5 +---- drivers/iommu/mtk_iommu.c | 5 +---- drivers/iommu/mtk_iommu_v1.c | 4 +--- drivers/iommu/omap-iommu.c | 5 +---- drivers/iommu/rockchip-iommu.c | 5 +---- drivers/iommu/s390-iommu.c | 4 +--- drivers/iommu/sprd-iommu.c | 5 +---- drivers/iommu/sun50i-iommu.c | 5 +---- drivers/iommu/tegra-gart.c | 5 +---- drivers/iommu/tegra-smmu.c | 5 +---- drivers/iommu/virtio-iommu.c | 5 +---- include/linux/iommu.h | 30 ++++++----------------------- 22 files changed, 44 insertions(+), 98 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 54f6e99528f3..d006724f4dc2 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1888,8 +1888,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, amd_iommu_groups, "ivhd%d", iommu->index); - iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); - iommu_device_register(&iommu->iommu); + iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); return pci_enable_device(iommu->dev); } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index bee7ce246dd2..54b2f27b81d4 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3666,10 +3666,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (ret) return ret; - iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); - iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); - - ret = iommu_device_register(&smmu->iommu); + ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); if (ret) { dev_err(dev, "Failed to register iommu\n"); return ret; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 837a2f1816b7..6f72c4d208ca 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -2161,10 +2161,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) return err; } - iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); - iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); - - err = iommu_device_register(&smmu->iommu); + err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); if (err) { dev_err(dev, "Failed to register iommu\n"); return err; diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index 7f280c8d5c53..4294abe389b2 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -847,10 +847,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) return ret; } - iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops); - iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode); - - ret = iommu_device_register(&qcom_iommu->iommu); + ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev); if (ret) { dev_err(dev, "Failed to register iommu\n"); return ret; diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 8fa9a591fb96..7623d8c371f5 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -630,10 +630,7 @@ static int exynos_sysmmu_probe(struct platform_device *pdev) if (ret) return ret; - iommu_device_set_ops(&data->iommu, &exynos_iommu_ops); - iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode); - - ret = iommu_device_register(&data->iommu); + ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); if (ret) return ret; diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 0ac781186dbd..a47f47307109 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -474,9 +474,7 @@ int __init pamu_domain_init(void) if (ret) return ret; - iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops); - - ret = iommu_device_register(&pamu_iommu); + ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL); if (ret) { iommu_device_sysfs_remove(&pamu_iommu); pr_err("Can't register iommu device\n"); diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 6971397805f3..1757ac1e1623 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1140,9 +1140,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) if (err) goto err_unmap; - iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); - - err = iommu_device_register(&iommu->iommu); + err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); if (err) goto err_unmap; } diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 0e04d450c38a..708f430af1c4 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4396,8 +4396,7 @@ int __init intel_iommu_init(void) iommu_device_sysfs_add(&iommu->iommu, NULL, intel_iommu_groups, "%s", iommu->name); - iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); - iommu_device_register(&iommu->iommu); + iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); } up_read(&dmar_global_lock); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index c4ad9c644802..808ab70d5df5 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -142,8 +142,25 @@ static int __init iommu_subsys_init(void) } subsys_initcall(iommu_subsys_init); -int iommu_device_register(struct iommu_device *iommu) +/** + * iommu_device_register() - Register an IOMMU hardware instance + * @iommu: IOMMU handle for the instance + * @ops: IOMMU ops to associate with the instance + * @hwdev: (optional) actual instance device, used for fwnode lookup + * + * Return: 0 on success, or an error. + */ +int iommu_device_register(struct iommu_device *iommu, + const struct iommu_ops *ops, struct device *hwdev) { + /* We need to be able to take module references appropriately */ + if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) + return -EINVAL; + + iommu->ops = ops; + if (hwdev) + iommu->fwnode = hwdev->fwnode; + spin_lock(&iommu_device_lock); list_add_tail(&iommu->list, &iommu_device_list); spin_unlock(&iommu_device_lock); diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index eaaec0a55cc6..aaa6a4d59057 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1076,11 +1076,7 @@ static int ipmmu_probe(struct platform_device *pdev) if (ret) return ret; - iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); - iommu_device_set_fwnode(&mmu->iommu, - &pdev->dev.of_node->fwnode); - - ret = iommu_device_register(&mmu->iommu); + ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev); if (ret) return ret; diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index f0ba6a09b434..7880f307cb2d 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -792,10 +792,7 @@ static int msm_iommu_probe(struct platform_device *pdev) goto fail; } - iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops); - iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode); - - ret = iommu_device_register(&iommu->iommu); + ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev); if (ret) { pr_err("Could not register msm-smmu at %pa\n", &ioaddr); goto fail; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index cdda3a85fc08..e06b8a0e2b56 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -892,10 +892,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) goto out_link_remove; - iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); - iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); - - ret = iommu_device_register(&data->iommu); + ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); if (ret) goto out_sysfs_remove; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 8ba9a2ec5509..5915d7b38211 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -616,9 +616,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) return ret; - iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); - - ret = iommu_device_register(&data->iommu); + ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); if (ret) goto out_sysfs_remove; diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 71f29c0927fc..26e517eb0dd3 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1235,10 +1235,7 @@ static int omap_iommu_probe(struct platform_device *pdev) if (err) goto out_group; - iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); - iommu_device_set_fwnode(&obj->iommu, &of->fwnode); - - err = iommu_device_register(&obj->iommu); + err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev); if (err) goto out_sysfs; } diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index e5d86b7177de..7a2932772fdf 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1196,10 +1196,7 @@ static int rk_iommu_probe(struct platform_device *pdev) if (err) goto err_put_group; - iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); - iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode); - - err = iommu_device_register(&iommu->iommu); + err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev); if (err) goto err_remove_sysfs; diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 8895dbb705eb..6019e58ce4fb 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -333,9 +333,7 @@ int zpci_init_iommu(struct zpci_dev *zdev) if (rc) goto out_err; - iommu_device_set_ops(&zdev->iommu_dev, &s390_iommu_ops); - - rc = iommu_device_register(&zdev->iommu_dev); + rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL); if (rc) goto out_sysfs; diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index 89272f9fd8f3..73dfd9946312 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -508,10 +508,7 @@ static int sprd_iommu_probe(struct platform_device *pdev) if (ret) goto put_group; - iommu_device_set_ops(&sdev->iommu, &sprd_iommu_ops); - iommu_device_set_fwnode(&sdev->iommu, &dev->of_node->fwnode); - - ret = iommu_device_register(&sdev->iommu); + ret = iommu_device_register(&sdev->iommu, &sprd_iommu_ops, dev); if (ret) goto remove_sysfs; diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index ea6db1341916..181bb1c3437c 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -968,10 +968,7 @@ static int sun50i_iommu_probe(struct platform_device *pdev) if (ret) goto err_free_group; - iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops); - iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode); - - ret = iommu_device_register(&iommu->iommu); + ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev); if (ret) goto err_remove_sysfs; diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 6f130e51f072..6a358f92c7e5 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -353,10 +353,7 @@ struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc) if (err) goto free_gart; - iommu_device_set_ops(&gart->iommu, &gart_iommu_ops); - iommu_device_set_fwnode(&gart->iommu, dev->fwnode); - - err = iommu_device_register(&gart->iommu); + err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev); if (err) goto remove_sysfs; diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 602aab98c079..1e98dc63ad13 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -1145,10 +1145,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, if (err) return ERR_PTR(err); - iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops); - iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); - - err = iommu_device_register(&smmu->iommu); + err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev); if (err) goto remove_sysfs; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 594ed827e944..7c02481a81b4 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1066,10 +1066,7 @@ static int viommu_probe(struct virtio_device *vdev) if (ret) goto err_free_vqs; - iommu_device_set_ops(&viommu->iommu, &viommu_ops); - iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode); - - iommu_device_register(&viommu->iommu); + iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); #ifdef CONFIG_PCI if (pci_bus_type.iommu_ops != &viommu_ops) { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index ce904cf4e774..32d448050bf7 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -350,7 +350,9 @@ struct dev_iommu { void *priv; }; -int iommu_device_register(struct iommu_device *iommu); +int iommu_device_register(struct iommu_device *iommu, + const struct iommu_ops *ops, + struct device *hwdev); void iommu_device_unregister(struct iommu_device *iommu); int iommu_device_sysfs_add(struct iommu_device *iommu, struct device *parent, @@ -361,18 +363,6 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); -static inline void iommu_device_set_ops(struct iommu_device *iommu, - const struct iommu_ops *ops) -{ - iommu->ops = ops; -} - -static inline void iommu_device_set_fwnode(struct iommu_device *iommu, - struct fwnode_handle *fwnode) -{ - iommu->fwnode = fwnode; -} - static inline struct iommu_device *dev_to_iommu_device(struct device *dev) { return (struct iommu_device *)dev_get_drvdata(dev); @@ -858,21 +848,13 @@ static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, return 0; } -static inline int iommu_device_register(struct iommu_device *iommu) +static inline int iommu_device_register(struct iommu_device *iommu, + const struct iommu_ops *ops, + struct device *hwdev) { return -ENODEV; } -static inline void iommu_device_set_ops(struct iommu_device *iommu, - const struct iommu_ops *ops) -{ -} - -static inline void iommu_device_set_fwnode(struct iommu_device *iommu, - struct fwnode_handle *fwnode) -{ -} - static inline struct iommu_device *dev_to_iommu_device(struct device *dev) { return NULL; -- cgit v1.2.3-71-gd317 From 7f100744749e4fe547dece3bb6557fae5f0a7252 Mon Sep 17 00:00:00 2001 From: Vidya Sagar Date: Fri, 16 Apr 2021 19:15:37 +0530 Subject: PCI: tegra: Add Tegra194 MCFG quirks for ECAM errata The PCIe controller in Tegra194 SoC is not ECAM-compliant. With the current hardware design, ECAM can be enabled only for one controller (the C5 controller) with bus numbers starting from 160 instead of 0. A different approach is taken to avoid this abnormal way of enabling ECAM for just one controller but to enable configuration space access for all the other controllers. In this approach, ops are added through MCFG quirk mechanism which access the configuration spaces by dynamically programming iATU (internal AddressTranslation Unit) to generate respective configuration accesses just like the way it is done in DesignWare core sub-system. This issue is specific to Tegra194 and it would be fixed in the future generations of Tegra SoCs. Link: https://lore.kernel.org/r/20210416134537.19474-1-vidyas@nvidia.com Signed-off-by: Vidya Sagar Signed-off-by: Bjorn Helgaas --- drivers/acpi/pci_mcfg.c | 7 ++ drivers/pci/controller/dwc/Makefile | 2 +- drivers/pci/controller/dwc/pcie-tegra194.c | 102 +++++++++++++++++++++++++++++ include/linux/pci-ecam.h | 1 + 4 files changed, 111 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 95f23acd5b80..53cab975f612 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -116,6 +116,13 @@ static struct mcfg_fixup mcfg_quirks[] = { THUNDER_ECAM_QUIRK(2, 12), THUNDER_ECAM_QUIRK(2, 13), + { "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops}, + #define XGENE_V1_ECAM_MCFG(rev, seg) \ {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ &xgene_v1_pcie_ecam_ops } diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index a751553fa0db..dbb981876556 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -17,7 +17,6 @@ obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o obj-$(CONFIG_PCI_MESON) += pci-meson.o -obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o @@ -34,4 +33,5 @@ obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o ifdef CONFIG_PCI obj-$(CONFIG_ARM64) += pcie-al.o obj-$(CONFIG_ARM64) += pcie-hisi.o +obj-$(CONFIG_ARM64) += pcie-tegra194.o endif diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 6fa216e52d14..a3979d358d06 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include #include @@ -311,6 +313,104 @@ struct tegra_pcie_dw_of_data { enum dw_pcie_device_mode mode; }; +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) +struct tegra194_pcie_ecam { + void __iomem *config_base; + void __iomem *iatu_base; + void __iomem *dbi_base; +}; + +static int tegra194_acpi_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct tegra194_pcie_ecam *pcie_ecam; + + pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL); + if (!pcie_ecam) + return -ENOMEM; + + pcie_ecam->config_base = cfg->win; + pcie_ecam->iatu_base = cfg->win + SZ_256K; + pcie_ecam->dbi_base = cfg->win + SZ_512K; + cfg->priv = pcie_ecam; + + return 0; +} + +static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index, + u32 val, u32 reg) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + + writel(val, pcie_ecam->iatu_base + offset + reg); +} + +static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam, + int index, int type, u64 cpu_addr, + u64 pci_addr, u64 size) +{ + atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr), + PCIE_ATU_LOWER_BASE); + atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr), + PCIE_ATU_UPPER_BASE); + atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr), + PCIE_ATU_LOWER_TARGET); + atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1), + PCIE_ATU_LIMIT); + atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr), + PCIE_ATU_UPPER_TARGET); + atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1); + atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +} + +static void __iomem *tegra194_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_config_window *cfg = bus->sysdata; + struct tegra194_pcie_ecam *pcie_ecam = cfg->priv; + u32 busdev; + int type; + + if (bus->number < cfg->busr.start || bus->number > cfg->busr.end) + return NULL; + + if (bus->number == cfg->busr.start) { + if (PCI_SLOT(devfn) == 0) + return pcie_ecam->dbi_base + where; + else + return NULL; + } + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); + + if (bus->parent->number == cfg->busr.start) { + if (PCI_SLOT(devfn) == 0) + type = PCIE_ATU_TYPE_CFG0; + else + return NULL; + } else { + type = PCIE_ATU_TYPE_CFG1; + } + + program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev, + SZ_256K); + + return pcie_ecam->config_base + where; +} + +const struct pci_ecam_ops tegra194_pcie_ops = { + .init = tegra194_acpi_init, + .pci_ops = { + .map_bus = tegra194_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; +#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */ + +#ifdef CONFIG_PCIE_TEGRA194 + static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) { return container_of(pci, struct tegra_pcie_dw, pci); @@ -2311,3 +2411,5 @@ MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); MODULE_AUTHOR("Vidya Sagar "); MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); MODULE_LICENSE("GPL v2"); + +#endif /* CONFIG_PCIE_TEGRA194 */ diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 65d3d83015c3..fbdadd4d8377 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -85,6 +85,7 @@ extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ +extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) -- cgit v1.2.3-71-gd317 From 5f7c292b8975c9146063abbb91c0b9cdc1a5e9c5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 25 Mar 2021 19:19:47 -0700 Subject: KVM: Move prototypes for MMU notifier callbacks to generic code Move the prototypes for the MMU notifier callbacks out of arch code and into common code. There is no benefit to having each arch replicate the prototypes since any deviation from the invocation in common code will explode. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210326021957.1424875-9-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/kvm_host.h | 5 ----- arch/mips/include/asm/kvm_host.h | 5 ----- arch/powerpc/include/asm/kvm_host.h | 7 ------- arch/x86/include/asm/kvm_host.h | 6 +----- include/linux/kvm_host.h | 8 ++++++++ 5 files changed, 9 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3d10e6527f7d..72e6b4600264 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -582,11 +582,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events); #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end, unsigned flags); -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 3a5612e7304c..feaa77036b67 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -967,11 +967,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, bool write); #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end, unsigned flags); -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); /* Emulation */ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 05fb00d37609..1e83359f286b 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -56,13 +56,6 @@ #define KVM_ARCH_WANT_MMU_NOTIFIER -extern int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end, - unsigned flags); -extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); - #define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE_LONG 12 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a52f973bdff6..99778ac51243 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1718,11 +1718,7 @@ asmlinkage void kvm_spurious_fault(void); _ASM_EXTABLE(666b, 667b) #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, - unsigned flags); -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); + int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_cpu_has_extint(struct kvm_vcpu *v); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1b65e7204344..e6d77353025c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -218,6 +218,14 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +int kvm_unmap_hva_range(struct kvm *kvm, + unsigned long start, unsigned long end, unsigned flags); +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); +#endif + enum { OUTSIDE_GUEST_MODE, IN_GUEST_MODE, -- cgit v1.2.3-71-gd317 From 6c9dd6d26216ad9733e57f382e1669c142494aab Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 2 Apr 2021 17:53:09 +0200 Subject: KVM: constify kvm_arch_flush_remote_tlbs_memslot memslots are stored in RCU and there should be no need to change them. Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arm.c | 2 +- arch/mips/kvm/mips.c | 2 +- arch/x86/kvm/mmu/mmu.c | 2 +- include/linux/kvm_host.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e575eff76e97..0d92a4e5fe80 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1270,7 +1270,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot) + const struct kvm_memory_slot *memslot) { kvm_flush_remote_tlbs(kvm); } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 58a8812e2fa5..7db8234a4407 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -997,7 +997,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot) + const struct kvm_memory_slot *memslot) { /* Let implementation handle TLB/GVA invalidation */ kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 9a369575c3a5..3546ada27506 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5638,7 +5638,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot) + const struct kvm_memory_slot *memslot) { /* * All current use cases for flushing the TLBs for a specific memslot diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e6d77353025c..34a974ffc882 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -894,7 +894,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot); + const struct kvm_memory_slot *memslot); #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, -- cgit v1.2.3-71-gd317 From 3039bcc744980afe87c612122e47a27306483bc2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 1 Apr 2021 17:56:50 -0700 Subject: KVM: Move x86's MMU notifier memslot walkers to generic code Move the hva->gfn lookup for MMU notifiers into common code. Every arch does a similar lookup, and some arch code is all but identical across multiple architectures. In addition to consolidating code, this will allow introducing optimizations that will benefit all architectures without incurring multiple walks of the memslots, e.g. by taking mmu_lock if and only if a relevant range exists in the memslots. The use of __always_inline to avoid indirect call retpolines, as done by x86, may also benefit other architectures. Consolidating the lookups also fixes a wart in x86, where the legacy MMU and TDP MMU each do their own memslot walks. Lastly, future enhancements to the memslot implementation, e.g. to add an interval tree to track host address, will need to touch far less arch specific code. MIPS, PPC, and arm64 will be converted one at a time in future patches. Signed-off-by: Sean Christopherson Message-Id: <20210402005658.3024832-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 127 ++++++++-------------- arch/x86/kvm/mmu/tdp_mmu.c | 235 ++++++++++++++-------------------------- arch/x86/kvm/mmu/tdp_mmu.h | 14 +-- include/linux/kvm_host.h | 14 +++ virt/kvm/kvm_main.c | 169 ++++++++++++++++++++++++++++- 6 files changed, 314 insertions(+), 246 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0d217c3db3f8..fc437a36204d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1727,6 +1727,7 @@ asmlinkage void kvm_spurious_fault(void); _ASM_EXTABLE(666b, 667b) #define KVM_ARCH_WANT_MMU_NOTIFIER +#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3546ada27506..d0f53189c257 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1298,26 +1298,25 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, return flush; } -static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - unsigned long data) +static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + pte_t unused) { return kvm_zap_rmapp(kvm, rmap_head, slot); } -static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - unsigned long data) +static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + pte_t pte) { u64 *sptep; struct rmap_iterator iter; int need_flush = 0; u64 new_spte; - pte_t *ptep = (pte_t *)data; kvm_pfn_t new_pfn; - WARN_ON(pte_huge(*ptep)); - new_pfn = pte_pfn(*ptep); + WARN_ON(pte_huge(pte)); + new_pfn = pte_pfn(pte); restart: for_each_rmap_spte(rmap_head, &iter, sptep) { @@ -1326,7 +1325,7 @@ restart: need_flush = 1; - if (pte_write(*ptep)) { + if (pte_write(pte)) { pte_list_remove(rmap_head, sptep); goto restart; } else { @@ -1414,86 +1413,52 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) slot_rmap_walk_okay(_iter_); \ slot_rmap_walk_next(_iter_)) -typedef int (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, - int level, unsigned long data); +typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, + int level, pte_t pte); -static __always_inline int kvm_handle_hva_range(struct kvm *kvm, - unsigned long start, - unsigned long end, - unsigned long data, - rmap_handler_t handler) +static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, + struct kvm_gfn_range *range, + rmap_handler_t handler) { - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; struct slot_rmap_walk_iterator iterator; - int ret = 0; - int i; - - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn_start, gfn_end; + bool ret = false; - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn_start = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - for_each_slot_rmap_range(memslot, PG_LEVEL_4K, - KVM_MAX_HUGEPAGE_LEVEL, - gfn_start, gfn_end - 1, - &iterator) - ret |= handler(kvm, iterator.rmap, memslot, - iterator.gfn, iterator.level, data); - } - } + for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, + range->start, range->end - 1, &iterator) + ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, + iterator.level, range->pte); return ret; } -static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, - unsigned long data, rmap_handler_t handler) +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); -} - -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, - unsigned flags) -{ - int r; + bool flush; - r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); + flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp); if (is_tdp_mmu_enabled(kvm)) - r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end); + flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); - return r; + return flush; } -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - int r; + bool flush; - r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); + flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp); if (is_tdp_mmu_enabled(kvm)) - r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte); + flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range); - return r; + return flush; } -static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - unsigned long data) +static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + pte_t unused) { u64 *sptep; struct rmap_iterator iter; @@ -1505,9 +1470,9 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, return young; } -static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, - int level, unsigned long data) +static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, + int level, pte_t unused) { u64 *sptep; struct rmap_iterator iter; @@ -1529,29 +1494,31 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); - kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); + kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); } -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - int young = false; + bool young; + + young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp); - young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); if (is_tdp_mmu_enabled(kvm)) - young |= kvm_tdp_mmu_age_hva_range(kvm, start, end); + young |= kvm_tdp_mmu_age_gfn_range(kvm, range); return young; } -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - int young = false; + bool young; + + young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp); - young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); if (is_tdp_mmu_enabled(kvm)) - young |= kvm_tdp_mmu_test_age_hva(kvm, hva); + young |= kvm_tdp_mmu_test_age_gfn(kvm, range); return young; } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index ccf0d774a181..ea15f3098eb2 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -873,204 +873,135 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, return ret; } -typedef int (*tdp_handler_t)(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t start, gfn_t end, - unsigned long data); - -static __always_inline int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, - unsigned long start, - unsigned long end, - unsigned long data, - tdp_handler_t handler) +bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, + bool flush) { - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; struct kvm_mmu_page *root; - int ret = 0; - int as_id; - - for (as_id = 0; as_id < KVM_ADDRESS_SPACE_NUM; as_id++) { - for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) { - slots = __kvm_memslots(kvm, as_id); - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn_start, gfn_end; - - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn_start = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - ret |= handler(kvm, memslot, root, gfn_start, - gfn_end, data); - } - } - } - return ret; -} + for_each_tdp_mmu_root(kvm, root, range->slot->as_id) + flush |= zap_gfn_range(kvm, root, range->start, range->end, + false, flush); -static __always_inline int kvm_tdp_mmu_handle_hva(struct kvm *kvm, - unsigned long addr, - unsigned long data, - tdp_handler_t handler) -{ - return kvm_tdp_mmu_handle_hva_range(kvm, addr, addr + 1, data, handler); + return flush; } -static int zap_gfn_range_hva_wrapper(struct kvm *kvm, - struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t start, - gfn_t end, unsigned long unused) -{ - return zap_gfn_range(kvm, root, start, end, false, false); -} +typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_gfn_range *range); -int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end) +static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, + struct kvm_gfn_range *range, + tdp_handler_t handler) { - return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0, - zap_gfn_range_hva_wrapper); + struct kvm_mmu_page *root; + struct tdp_iter iter; + bool ret = false; + + rcu_read_lock(); + + for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { + tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) + ret |= handler(kvm, &iter, range); + } + + rcu_read_unlock(); + + return ret; } /* * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero * if any of the GFNs in the range have been accessed. */ -static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t start, gfn_t end, - unsigned long unused) +static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_gfn_range *range) { - struct tdp_iter iter; - int young = 0; - u64 new_spte; + u64 new_spte = 0; - rcu_read_lock(); + /* If we have a non-accessed entry we don't need to change the pte. */ + if (!is_accessed_spte(iter->old_spte)) + return false; - tdp_root_for_each_leaf_pte(iter, root, start, end) { + new_spte = iter->old_spte; + + if (spte_ad_enabled(new_spte)) { + new_spte &= ~shadow_accessed_mask; + } else { /* - * If we have a non-accessed entry we don't need to change the - * pte. + * Capture the dirty status of the page, so that it doesn't get + * lost when the SPTE is marked for access tracking. */ - if (!is_accessed_spte(iter.old_spte)) - continue; + if (is_writable_pte(new_spte)) + kvm_set_pfn_dirty(spte_to_pfn(new_spte)); - new_spte = iter.old_spte; - - if (spte_ad_enabled(new_spte)) { - new_spte &= ~shadow_accessed_mask; - } else { - /* - * Capture the dirty status of the page, so that it doesn't get - * lost when the SPTE is marked for access tracking. - */ - if (is_writable_pte(new_spte)) - kvm_set_pfn_dirty(spte_to_pfn(new_spte)); - - new_spte = mark_spte_for_access_track(new_spte); - } - - tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte); - young = 1; + new_spte = mark_spte_for_access_track(new_spte); } - rcu_read_unlock(); + tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); - return young; + return true; } -int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end) +bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0, - age_gfn_range); + return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); } -static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t gfn, gfn_t end, - unsigned long unused) +static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_gfn_range *range) { - struct tdp_iter iter; - - tdp_root_for_each_leaf_pte(iter, root, gfn, end) - if (is_accessed_spte(iter.old_spte)) - return 1; - - return 0; + return is_accessed_spte(iter->old_spte); } -int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva) +bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm_tdp_mmu_handle_hva(kvm, hva, 0, test_age_gfn); + return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); } -/* - * Handle the changed_pte MMU notifier for the TDP MMU. - * data is a pointer to the new pte_t mapping the HVA specified by the MMU - * notifier. - * Returns non-zero if a flush is needed before releasing the MMU lock. - */ -static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot, - struct kvm_mmu_page *root, gfn_t gfn, gfn_t end, - unsigned long data) +static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_gfn_range *range) { - struct tdp_iter iter; - pte_t *ptep = (pte_t *)data; - kvm_pfn_t new_pfn; u64 new_spte; - int need_flush = 0; - - rcu_read_lock(); - WARN_ON(pte_huge(*ptep) || (gfn + 1) != end); + /* Huge pages aren't expected to be modified without first being zapped. */ + WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); - new_pfn = pte_pfn(*ptep); - - tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { - if (iter.level != PG_LEVEL_4K) - continue; - - if (!is_shadow_present_pte(iter.old_spte)) - break; - - /* - * Note, when changing a read-only SPTE, it's not strictly - * necessary to zero the SPTE before setting the new PFN, but - * doing so preserves the invariant that the PFN of a present - * leaf SPTE can never change. See __handle_changed_spte(). - */ - tdp_mmu_set_spte(kvm, &iter, 0); + if (iter->level != PG_LEVEL_4K || + !is_shadow_present_pte(iter->old_spte)) + return false; - if (!pte_write(*ptep)) { - new_spte = kvm_mmu_changed_pte_notifier_make_spte( - iter.old_spte, new_pfn); + /* + * Note, when changing a read-only SPTE, it's not strictly necessary to + * zero the SPTE before setting the new PFN, but doing so preserves the + * invariant that the PFN of a present * leaf SPTE can never change. + * See __handle_changed_spte(). + */ + tdp_mmu_set_spte(kvm, iter, 0); - tdp_mmu_set_spte(kvm, &iter, new_spte); - } + if (!pte_write(range->pte)) { + new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, + pte_pfn(range->pte)); - need_flush = 1; + tdp_mmu_set_spte(kvm, iter, new_spte); } - if (need_flush) - kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); - - rcu_read_unlock(); - - return 0; + return true; } -int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address, - pte_t *host_ptep) +/* + * Handle the changed_pte MMU notifier for the TDP MMU. + * data is a pointer to the new pte_t mapping the HVA specified by the MMU + * notifier. + * Returns non-zero if a flush is needed before releasing the MMU lock. + */ +bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - return kvm_tdp_mmu_handle_hva(kvm, address, (unsigned long)host_ptep, - set_tdp_spte); + bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); + + /* FIXME: return 'flush' instead of flushing here. */ + if (flush) + kvm_flush_remote_tlbs_with_address(kvm, range->start, 1); + + return false; } /* diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index bf3ce169122e..ee8efa58902f 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -38,15 +38,11 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int map_writable, int max_level, kvm_pfn_t pfn, bool prefault); -int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end); - -int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start, - unsigned long end); -int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva); - -int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address, - pte_t *host_ptep); +bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, + bool flush); +bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, int min_level); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 34a974ffc882..0228436bc446 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -219,11 +219,25 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif #ifdef KVM_ARCH_WANT_MMU_NOTIFIER +#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS +struct kvm_gfn_range { + struct kvm_memory_slot *slot; + gfn_t start; + gfn_t end; + pte_t pte; + bool may_block; +}; +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +#else int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); +#endif /* KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS */ #endif enum { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5ce91254a793..20836cf342ba 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -451,14 +451,131 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, srcu_read_unlock(&kvm->srcu, idx); } +#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS + +typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); + +struct kvm_hva_range { + unsigned long start; + unsigned long end; + pte_t pte; + hva_handler_t handler; + bool flush_on_ret; + bool may_block; +}; + +static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, + const struct kvm_hva_range *range) +{ + struct kvm_memory_slot *slot; + struct kvm_memslots *slots; + struct kvm_gfn_range gfn_range; + bool ret = false; + int i, idx; + + lockdep_assert_held_write(&kvm->mmu_lock); + + idx = srcu_read_lock(&kvm->srcu); + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(slot, slots) { + unsigned long hva_start, hva_end; + + hva_start = max(range->start, slot->userspace_addr); + hva_end = min(range->end, slot->userspace_addr + + (slot->npages << PAGE_SHIFT)); + if (hva_start >= hva_end) + continue; + + /* + * To optimize for the likely case where the address + * range is covered by zero or one memslots, don't + * bother making these conditional (to avoid writes on + * the second or later invocation of the handler). + */ + gfn_range.pte = range->pte; + gfn_range.may_block = range->may_block; + + /* + * {gfn(page) | page intersects with [hva_start, hva_end)} = + * {gfn_start, gfn_start+1, ..., gfn_end-1}. + */ + gfn_range.start = hva_to_gfn_memslot(hva_start, slot); + gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); + gfn_range.slot = slot; + + ret |= range->handler(kvm, &gfn_range); + } + } + + if (range->flush_on_ret && (ret || kvm->tlbs_dirty)) + kvm_flush_remote_tlbs(kvm); + + srcu_read_unlock(&kvm->srcu, idx); + + /* The notifiers are averse to booleans. :-( */ + return (int)ret; +} + +static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, + unsigned long start, + unsigned long end, + pte_t pte, + hva_handler_t handler) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + const struct kvm_hva_range range = { + .start = start, + .end = end, + .pte = pte, + .handler = handler, + .flush_on_ret = true, + .may_block = false, + }; + int ret; + + KVM_MMU_LOCK(kvm); + ret = __kvm_handle_hva_range(kvm, &range); + KVM_MMU_UNLOCK(kvm); + + return ret; +} + +static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, + unsigned long start, + unsigned long end, + hva_handler_t handler) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + const struct kvm_hva_range range = { + .start = start, + .end = end, + .pte = __pte(0), + .handler = handler, + .flush_on_ret = false, + .may_block = false, + }; + int ret; + + KVM_MMU_LOCK(kvm); + ret = __kvm_handle_hva_range(kvm, &range); + KVM_MMU_UNLOCK(kvm); + + return ret; +} +#endif /* KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS */ + static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, pte_t pte) { struct kvm *kvm = mmu_notifier_to_kvm(mn); - int idx; +#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS + int idx; +#endif trace_kvm_set_spte_hva(address); /* @@ -468,6 +585,9 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, */ WARN_ON_ONCE(!kvm->mmu_notifier_count); +#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS + kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); +#else idx = srcu_read_lock(&kvm->srcu); KVM_MMU_LOCK(kvm); @@ -477,17 +597,32 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); +#endif } static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, const struct mmu_notifier_range *range) { struct kvm *kvm = mmu_notifier_to_kvm(mn); +#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS + const struct kvm_hva_range hva_range = { + .start = range->start, + .end = range->end, + .pte = __pte(0), + .handler = kvm_unmap_gfn_range, + .flush_on_ret = true, + .may_block = mmu_notifier_range_blockable(range), + }; +#else int need_tlb_flush = 0, idx; +#endif trace_kvm_unmap_hva_range(range->start, range->end); +#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS idx = srcu_read_lock(&kvm->srcu); +#endif + KVM_MMU_LOCK(kvm); /* * The count increase must become visible at unlock time as no @@ -513,14 +648,21 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm->mmu_notifier_range_end = max(kvm->mmu_notifier_range_end, range->end); } + +#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS + __kvm_handle_hva_range(kvm, &hva_range); +#else need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, range->flags); /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush || kvm->tlbs_dirty) kvm_flush_remote_tlbs(kvm); +#endif KVM_MMU_UNLOCK(kvm); +#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS srcu_read_unlock(&kvm->srcu, idx); +#endif return 0; } @@ -554,11 +696,15 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, unsigned long start, unsigned long end) { +#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; - +#endif trace_kvm_age_hva(start, end); +#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS + return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); +#else idx = srcu_read_lock(&kvm->srcu); KVM_MMU_LOCK(kvm); @@ -570,6 +716,7 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, srcu_read_unlock(&kvm->srcu, idx); return young; +#endif } static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, @@ -577,13 +724,13 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, unsigned long start, unsigned long end) { +#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; +#endif trace_kvm_age_hva(start, end); - idx = srcu_read_lock(&kvm->srcu); - KVM_MMU_LOCK(kvm); /* * Even though we do not flush TLB, this will still adversely * affect performance on pre-Haswell Intel EPT, where there is @@ -597,22 +744,33 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, * cadence. If we find this inaccurate, we might come up with a * more sophisticated heuristic later. */ +#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS + return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); +#else + idx = srcu_read_lock(&kvm->srcu); + KVM_MMU_LOCK(kvm); young = kvm_age_hva(kvm, start, end); KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); return young; +#endif } static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { +#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; - +#endif trace_kvm_test_age_hva(address); +#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS + return kvm_handle_hva_range_no_flush(mn, address, address + 1, + kvm_test_age_gfn); +#else idx = srcu_read_lock(&kvm->srcu); KVM_MMU_LOCK(kvm); young = kvm_test_age_hva(kvm, address); @@ -620,6 +778,7 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, srcu_read_unlock(&kvm->srcu, idx); return young; +#endif } static void kvm_mmu_notifier_release(struct mmu_notifier *mn, -- cgit v1.2.3-71-gd317 From b4c5936c47f86295cc76672e8dbeeca8b2379ba6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 1 Apr 2021 17:56:54 -0700 Subject: KVM: Kill off the old hva-based MMU notifier callbacks Yank out the hva-based MMU notifier APIs now that all architectures that use the notifiers have moved to the gfn-based APIs. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20210402005658.3024832-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/kvm_host.h | 1 - arch/mips/include/asm/kvm_host.h | 1 - arch/powerpc/include/asm/kvm_host.h | 1 - arch/x86/include/asm/kvm_host.h | 1 - include/linux/kvm_host.h | 8 ---- virt/kvm/kvm_main.c | 85 ------------------------------------- 6 files changed, 97 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index eba448bd4c7f..3a708be3ed71 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -586,7 +586,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events); #define KVM_ARCH_WANT_MMU_NOTIFIER -#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index b70645670548..d0944a75fc8d 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -960,7 +960,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, bool write); #define KVM_ARCH_WANT_MMU_NOTIFIER -#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS /* Emulation */ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1335f0001bdd..1e83359f286b 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -55,7 +55,6 @@ #include #define KVM_ARCH_WANT_MMU_NOTIFIER -#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS #define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_HASH_BITS_PTE 13 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fc437a36204d..0d217c3db3f8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1727,7 +1727,6 @@ asmlinkage void kvm_spurious_fault(void); _ASM_EXTABLE(666b, 667b) #define KVM_ARCH_WANT_MMU_NOTIFIER -#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0228436bc446..6b4dd9500d70 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -219,7 +219,6 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif #ifdef KVM_ARCH_WANT_MMU_NOTIFIER -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS struct kvm_gfn_range { struct kvm_memory_slot *slot; gfn_t start; @@ -231,13 +230,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -#else -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end, unsigned flags); -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -#endif /* KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS */ #endif enum { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 20836cf342ba..d4c249719a56 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -451,8 +451,6 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, srcu_read_unlock(&kvm->srcu, idx); } -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); struct kvm_hva_range { @@ -564,8 +562,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn return ret; } -#endif /* KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS */ - static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, @@ -573,9 +569,6 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, { struct kvm *kvm = mmu_notifier_to_kvm(mn); -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - int idx; -#endif trace_kvm_set_spte_hva(address); /* @@ -585,26 +578,13 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, */ WARN_ON_ONCE(!kvm->mmu_notifier_count); -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); -#else - idx = srcu_read_lock(&kvm->srcu); - - KVM_MMU_LOCK(kvm); - - if (kvm_set_spte_hva(kvm, address, pte)) - kvm_flush_remote_tlbs(kvm); - - KVM_MMU_UNLOCK(kvm); - srcu_read_unlock(&kvm->srcu, idx); -#endif } static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, const struct mmu_notifier_range *range) { struct kvm *kvm = mmu_notifier_to_kvm(mn); -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS const struct kvm_hva_range hva_range = { .start = range->start, .end = range->end, @@ -613,16 +593,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, .flush_on_ret = true, .may_block = mmu_notifier_range_blockable(range), }; -#else - int need_tlb_flush = 0, idx; -#endif trace_kvm_unmap_hva_range(range->start, range->end); -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - idx = srcu_read_lock(&kvm->srcu); -#endif - KVM_MMU_LOCK(kvm); /* * The count increase must become visible at unlock time as no @@ -649,20 +622,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, max(kvm->mmu_notifier_range_end, range->end); } -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS __kvm_handle_hva_range(kvm, &hva_range); -#else - need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, - range->flags); - /* we've to flush the tlb before the pages can be freed */ - if (need_tlb_flush || kvm->tlbs_dirty) - kvm_flush_remote_tlbs(kvm); -#endif KVM_MMU_UNLOCK(kvm); -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - srcu_read_unlock(&kvm->srcu, idx); -#endif return 0; } @@ -696,27 +658,9 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, unsigned long start, unsigned long end) { -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - struct kvm *kvm = mmu_notifier_to_kvm(mn); - int young, idx; -#endif trace_kvm_age_hva(start, end); -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); -#else - idx = srcu_read_lock(&kvm->srcu); - KVM_MMU_LOCK(kvm); - - young = kvm_age_hva(kvm, start, end); - if (young) - kvm_flush_remote_tlbs(kvm); - - KVM_MMU_UNLOCK(kvm); - srcu_read_unlock(&kvm->srcu, idx); - - return young; -#endif } static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, @@ -724,11 +668,6 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, unsigned long start, unsigned long end) { -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - struct kvm *kvm = mmu_notifier_to_kvm(mn); - int young, idx; -#endif - trace_kvm_age_hva(start, end); /* @@ -744,41 +683,17 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, * cadence. If we find this inaccurate, we might come up with a * more sophisticated heuristic later. */ -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); -#else - idx = srcu_read_lock(&kvm->srcu); - KVM_MMU_LOCK(kvm); - young = kvm_age_hva(kvm, start, end); - KVM_MMU_UNLOCK(kvm); - srcu_read_unlock(&kvm->srcu, idx); - - return young; -#endif } static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { -#ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS - struct kvm *kvm = mmu_notifier_to_kvm(mn); - int young, idx; -#endif trace_kvm_test_age_hva(address); -#ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS return kvm_handle_hva_range_no_flush(mn, address, address + 1, kvm_test_age_gfn); -#else - idx = srcu_read_lock(&kvm->srcu); - KVM_MMU_LOCK(kvm); - young = kvm_test_age_hva(kvm, address); - KVM_MMU_UNLOCK(kvm); - srcu_read_unlock(&kvm->srcu, idx); - - return young; -#endif } static void kvm_mmu_notifier_release(struct mmu_notifier *mn, -- cgit v1.2.3-71-gd317 From 8ca6f063b73d3754213d009efb3df486c8fe52d2 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Thu, 1 Apr 2021 16:37:24 -0700 Subject: KVM: x86/mmu: Re-add const qualifier in kvm_tdp_mmu_zap_collapsible_sptes kvm_tdp_mmu_zap_collapsible_sptes unnecessarily removes the const qualifier from its memlsot argument, leading to a compiler warning. Add the const annotation and pass it to subsequent functions. Signed-off-by: Ben Gardon Message-Id: <20210401233736.638171-2-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 10 +++++----- arch/x86/kvm/mmu/mmu_internal.h | 5 +++-- arch/x86/kvm/mmu/tdp_mmu.c | 5 +++-- arch/x86/kvm/mmu/tdp_mmu.h | 3 ++- include/linux/kvm_host.h | 2 +- 5 files changed, 14 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d0f53189c257..6395ab9450af 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -715,8 +715,7 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) * handling slots that are not large page aligned. */ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, - struct kvm_memory_slot *slot, - int level) + const struct kvm_memory_slot *slot, int level) { unsigned long idx; @@ -2702,7 +2701,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) } static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, - struct kvm_memory_slot *slot) + const struct kvm_memory_slot *slot) { unsigned long hva; pte_t *pte; @@ -2728,8 +2727,9 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, return level; } -int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t pfn, int max_level) +int kvm_mmu_max_mapping_level(struct kvm *kvm, + const struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t pfn, int max_level) { struct kvm_lpage_info *linfo; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index f296b8a258ff..9f513822b568 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -161,8 +161,9 @@ enum { #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_SPURIOUS BIT(2) -int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t pfn, int max_level); +int kvm_mmu_max_mapping_level(struct kvm *kvm, + const struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t pfn, int max_level); int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int max_level, kvm_pfn_t *pfnp, bool huge_page_disallowed, int *req_level); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 10cb16c55dd0..5f517ef06e61 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1190,7 +1190,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, */ static bool zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, - struct kvm_memory_slot *slot, + const struct kvm_memory_slot *slot, bool flush) { gfn_t start = slot->base_gfn; @@ -1231,7 +1231,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm, * be replaced by large mappings, for GFNs within the slot. */ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - struct kvm_memory_slot *slot, bool flush) + const struct kvm_memory_slot *slot, + bool flush) { struct kvm_mmu_page *root; diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index ee8efa58902f..11050994e7e4 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -53,7 +53,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, gfn_t gfn, unsigned long mask, bool wrprot); bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - struct kvm_memory_slot *slot, bool flush); + const struct kvm_memory_slot *slot, + bool flush); bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6b4dd9500d70..d17e3ff1138d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1130,7 +1130,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) } static inline unsigned long -__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) +__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; } -- cgit v1.2.3-71-gd317 From 7a35693adcd38664b852ad10e3742782b3e87987 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 7 Apr 2021 14:25:22 +0100 Subject: dm: replace dm_vcalloc() Use kvcalloc or kvmalloc_array instead (depending whether zeroing is useful). Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Mike Snitzer --- drivers/md/dm-snap-persistent.c | 6 +++--- drivers/md/dm-snap.c | 5 +++-- drivers/md/dm-table.c | 30 ++++++------------------------ include/linux/device-mapper.h | 5 ----- 4 files changed, 12 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 8e329c3f3a78..9ab4bf651ca9 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -596,7 +596,7 @@ static void persistent_dtr(struct dm_exception_store *store) free_area(ps); /* Allocated in persistent_read_metadata */ - vfree(ps->callbacks); + kvfree(ps->callbacks); kfree(ps); } @@ -621,8 +621,8 @@ static int persistent_read_metadata(struct dm_exception_store *store, */ ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / sizeof(struct disk_exception); - ps->callbacks = dm_vcalloc(ps->exceptions_per_area, - sizeof(*ps->callbacks)); + ps->callbacks = kvcalloc(ps->exceptions_per_area, + sizeof(*ps->callbacks), GFP_KERNEL); if (!ps->callbacks) return -ENOMEM; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 11890db71f3f..a2acb014c13a 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -663,7 +663,8 @@ static int dm_exception_table_init(struct dm_exception_table *et, et->hash_shift = hash_shift; et->hash_mask = size - 1; - et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head)); + et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head), + GFP_KERNEL); if (!et->table) return -ENOMEM; @@ -689,7 +690,7 @@ static void dm_exception_table_exit(struct dm_exception_table *et, kmem_cache_free(mem, ex); } - vfree(et->table); + kvfree(et->table); } static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e5f0f1703c5d..ee47a332b462 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -94,24 +94,6 @@ static int setup_btree_index(unsigned int l, struct dm_table *t) return 0; } -void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) -{ - unsigned long size; - void *addr; - - /* - * Check that we're not going to overflow. - */ - if (nmemb > (ULONG_MAX / elem_size)) - return NULL; - - size = nmemb * elem_size; - addr = vzalloc(size); - - return addr; -} -EXPORT_SYMBOL(dm_vcalloc); - /* * highs, and targets are managed as dynamic arrays during a * table load. @@ -124,15 +106,15 @@ static int alloc_targets(struct dm_table *t, unsigned int num) /* * Allocate both the target array and offset array at once. */ - n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + - sizeof(sector_t)); + n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t), + GFP_KERNEL); if (!n_highs) return -ENOMEM; n_targets = (struct dm_target *) (n_highs + num); memset(n_highs, -1, sizeof(*n_highs) * num); - vfree(t->highs); + kvfree(t->highs); t->num_allocated = num; t->highs = n_highs; @@ -198,7 +180,7 @@ void dm_table_destroy(struct dm_table *t) /* free the indexes */ if (t->depth >= 2) - vfree(t->index[t->depth - 2]); + kvfree(t->index[t->depth - 2]); /* free the targets */ for (i = 0; i < t->num_targets; i++) { @@ -210,7 +192,7 @@ void dm_table_destroy(struct dm_table *t) dm_put_target_type(tgt->type); } - vfree(t->highs); + kvfree(t->highs); /* free the device list */ free_devices(&t->devices, t->md); @@ -1077,7 +1059,7 @@ static int setup_indexes(struct dm_table *t) total += t->counts[i]; } - indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); + indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL); if (!indexes) return -ENOMEM; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 5c641f930caf..ff700fb6ce1d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -574,11 +574,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, */ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); -/* - * A wrapper around vmalloc. - */ -void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); - /*----------------------------------------------------------------- * Macros. *---------------------------------------------------------------*/ -- cgit v1.2.3-71-gd317 From 5d3c4c79384af06e3c8e25b7770b6247496b4417 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 12 Apr 2021 15:20:49 -0700 Subject: KVM: Stop looking for coalesced MMIO zones if the bus is destroyed Abort the walk of coalesced MMIO zones if kvm_io_bus_unregister_dev() fails to allocate memory for the new instance of the bus. If it can't instantiate a new bus, unregister_dev() destroys all devices _except_ the target device. But, it doesn't tell the caller that it obliterated the bus and invoked the destructor for all devices that were on the bus. In the coalesced MMIO case, this can result in a deleted list entry dereference due to attempting to continue iterating on coalesced_zones after future entries (in the walk) have been deleted. Opportunistically add curly braces to the for-loop, which encompasses many lines but sneaks by without braces due to the guts being a single if statement. Fixes: f65886606c2d ("KVM: fix memory leak in kvm_io_bus_unregister_dev()") Cc: stable@vger.kernel.org Reported-by: Hao Sun Signed-off-by: Sean Christopherson Message-Id: <20210412222050.876100-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 4 ++-- virt/kvm/coalesced_mmio.c | 19 +++++++++++++++++-- virt/kvm/kvm_main.c | 10 +++++----- 3 files changed, 24 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d17e3ff1138d..82b066db37cb 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev); +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr); diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 62bd908ecd58..f08f5e82460b 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -174,21 +174,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { struct kvm_coalesced_mmio_dev *dev, *tmp; + int r; if (zone->pio != 1 && zone->pio != 0) return -EINVAL; mutex_lock(&kvm->slots_lock); - list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) + list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) { if (zone->pio == dev->zone.pio && coalesced_mmio_in_range(dev, zone->addr, zone->size)) { - kvm_io_bus_unregister_dev(kvm, + r = kvm_io_bus_unregister_dev(kvm, zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); kvm_iodevice_destructor(&dev->dev); + + /* + * On failure, unregister destroys all devices on the + * bus _except_ the target device, i.e. coalesced_zones + * has been modified. No need to restart the walk as + * there aren't any zones left. + */ + if (r) + break; } + } mutex_unlock(&kvm->slots_lock); + /* + * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's + * perspective, the coalesced MMIO is most definitely unregistered. + */ return 0; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c771d40737c9..f84b126c32d6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4621,15 +4621,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, } /* Caller must hold slots_lock. */ -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev) +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev) { int i, j; struct kvm_io_bus *new_bus, *bus; bus = kvm_get_bus(kvm, bus_idx); if (!bus) - return; + return 0; for (i = 0; i < bus->dev_count; i++) if (bus->range[i].dev == dev) { @@ -4637,7 +4637,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, } if (i == bus->dev_count) - return; + return 0; new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), GFP_KERNEL_ACCOUNT); @@ -4662,7 +4662,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, } kfree(bus); - return; + return new_bus ? 0 : -ENOMEM; } struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, -- cgit v1.2.3-71-gd317 From 3a05d08f6cc75b74079290c33d6127b2857226fa Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 30 Mar 2021 16:11:38 +0100 Subject: PCI/MSI: Drop use of msi_controller from core code As there is no driver using msi_controller, we can now safely remove its use from the PCI probe code. Link: https://lore.kernel.org/r/20210330151145.997953-8-maz@kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Lorenzo Pieralisi Acked-by: Bjorn Helgaas --- drivers/pci/msi.c | 23 +---------------------- drivers/pci/probe.c | 2 -- include/linux/pci.h | 2 -- 3 files changed, 1 insertion(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 3162f88fe940..79b5a995bd02 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -64,39 +64,18 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) /* Arch hooks */ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) { - struct msi_controller *chip = dev->bus->msi; - int err; - - if (!chip || !chip->setup_irq) - return -EINVAL; - - err = chip->setup_irq(chip, dev, desc); - if (err < 0) - return err; - - irq_set_chip_data(desc->irq, chip); - - return 0; + return -EINVAL; } void __weak arch_teardown_msi_irq(unsigned int irq) { - struct msi_controller *chip = irq_get_chip_data(irq); - - if (!chip || !chip->teardown_irq) - return; - - chip->teardown_irq(chip, irq); } int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { - struct msi_controller *chip = dev->bus->msi; struct msi_desc *entry; int ret; - if (chip && chip->setup_irqs) - return chip->setup_irqs(chip, dev, nvec, type); /* * If an architecture wants to support multiple MSI, it needs to * override arch_setup_msi_irqs() diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 953f15abc850..fb04fc81a8bd 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -895,7 +895,6 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) /* Temporarily move resources off the list */ list_splice_init(&bridge->windows, &resources); bus->sysdata = bridge->sysdata; - bus->msi = bridge->msi; bus->ops = bridge->ops; bus->number = bus->busn_res.start = bridge->busnr; #ifdef CONFIG_PCI_DOMAINS_GENERIC @@ -1053,7 +1052,6 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, return NULL; child->parent = parent; - child->msi = parent->msi; child->sysdata = parent->sysdata; child->bus_flags = parent->bus_flags; diff --git a/include/linux/pci.h b/include/linux/pci.h index 86c799c97b77..ebf557e59d87 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -540,7 +540,6 @@ struct pci_host_bridge { int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); void *release_data; - struct msi_controller *msi; unsigned int ignore_reset_delay:1; /* For entire hierarchy */ unsigned int no_ext_tags:1; /* No Extended Tags */ unsigned int native_aer:1; /* OS may use PCIe AER */ @@ -621,7 +620,6 @@ struct pci_bus { struct resource busn_res; /* Bus numbers routed to this bus */ struct pci_ops *ops; /* Configuration access functions */ - struct msi_controller *msi; /* MSI controller */ void *sysdata; /* Hook for sys-specific extension */ struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ -- cgit v1.2.3-71-gd317 From b227be0d7314d0869d4e28c199ac1fc7075cf06e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 30 Mar 2021 16:11:39 +0100 Subject: PCI/MSI: Kill msi_controller structure msi_controller had a good, long life as the abstraction for a driver providing MSIs to PCI devices. But it has been replaced in all drivers by the more expressive generic MSI framework. Farewell, struct msi_controller. Link: https://lore.kernel.org/r/20210330151145.997953-9-maz@kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Lorenzo Pieralisi Acked-by: Bjorn Helgaas --- include/linux/msi.h | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/msi.h b/include/linux/msi.h index aef35fd1cf11..3f21e77b57b7 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -240,8 +240,7 @@ void pci_msi_unmask_irq(struct irq_data *data); /* * The arch hooks to setup up msi irqs. Default functions are implemented * as weak symbols so that they /can/ be overriden by architecture specific - * code if needed. These hooks must be enabled by the architecture or by - * drivers which depend on them via msi_controller based MSI handling. + * code if needed. These hooks can only be enabled by the architecture. * * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by * stubs with warnings. @@ -272,19 +271,6 @@ static inline void arch_teardown_msi_irqs(struct pci_dev *dev) void arch_restore_msi_irqs(struct pci_dev *dev); void default_restore_msi_irqs(struct pci_dev *dev); -struct msi_controller { - struct module *owner; - struct device *dev; - struct device_node *of_node; - struct list_head list; - - int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, - struct msi_desc *desc); - int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, - int nvec, int type); - void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); -}; - #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN #include -- cgit v1.2.3-71-gd317 From f8bcf249d9cf292c6ceb3d9f5bd90815090f5286 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 30 Mar 2021 16:11:40 +0100 Subject: PCI/MSI: Kill default_teardown_msi_irqs() It doesn't have any caller left. Link: https://lore.kernel.org/r/20210330151145.997953-10-maz@kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Lorenzo Pieralisi Acked-by: Bjorn Helgaas --- drivers/pci/msi.c | 11 +---------- include/linux/msi.h | 1 - 2 files changed, 1 insertion(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 79b5a995bd02..d9c73c173c14 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -94,11 +94,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return 0; } -/* - * We have a default implementation available as a separate non-weak - * function, as it is used by the Xen x86 PCI code - */ -void default_teardown_msi_irqs(struct pci_dev *dev) +void __weak arch_teardown_msi_irqs(struct pci_dev *dev) { int i; struct msi_desc *entry; @@ -108,11 +104,6 @@ void default_teardown_msi_irqs(struct pci_dev *dev) for (i = 0; i < entry->nvec_used; i++) arch_teardown_msi_irq(entry->irq + i); } - -void __weak arch_teardown_msi_irqs(struct pci_dev *dev) -{ - return default_teardown_msi_irqs(dev); -} #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ static void default_restore_msi_irq(struct pci_dev *dev, int irq) diff --git a/include/linux/msi.h b/include/linux/msi.h index 3f21e77b57b7..6aff469e511d 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -250,7 +250,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); void arch_teardown_msi_irqs(struct pci_dev *dev); -void default_teardown_msi_irqs(struct pci_dev *dev); #else static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { -- cgit v1.2.3-71-gd317 From 94e89b145371b68fa0ea294855adebcd03e0522e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 30 Mar 2021 16:11:41 +0100 Subject: PCI/MSI: Let PCI host bridges declare their reliance on MSI domains There is a whole class of host bridges that cannot know whether MSIs will be provided or not, as they rely on other blocks to provide the MSI functionnality, using MSI domains. This is the case for example on systems that use the ARM GIC architecture. Introduce a new attribute ('msi_domain') indicating that implicit dependency, and use this property to set the NO_MSI flag when no MSI domain is found at probe time. Link: https://lore.kernel.org/r/20210330151145.997953-11-maz@kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Lorenzo Pieralisi Acked-by: Bjorn Helgaas --- drivers/pci/probe.c | 2 ++ include/linux/pci.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index fb04fc81a8bd..aa6fba35f5d1 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -925,6 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); pci_set_bus_msi_domain(bus); + if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev)) + bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI; if (!parent) set_dev_node(bus->bridge, pcibus_to_node(bus)); diff --git a/include/linux/pci.h b/include/linux/pci.h index ebf557e59d87..ede0aef2cfd4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -550,6 +550,7 @@ struct pci_host_bridge { unsigned int native_dpc:1; /* OS may use PCIe DPC */ unsigned int preserve_config:1; /* Preserve FW resource setup */ unsigned int size_windows:1; /* Enable root bus sizing */ + unsigned int msi_domain:1; /* Bridge wants MSI domain */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, -- cgit v1.2.3-71-gd317 From 67880f1bc342ed4c94e72cad7f8ca76e5121aae3 Mon Sep 17 00:00:00 2001 From: Prashant Malani Date: Tue, 20 Apr 2021 10:16:11 -0700 Subject: platform/chrome: cros_ec: Add Type C hard reset Update the EC command header to include the new event bit. This bit is included in the latest version of the Chrome EC headers[1]. [1] https://chromium.googlesource.com/chromiumos/platform/ec/+/refs/heads/main/include/ec_commands.h Signed-off-by: Prashant Malani Signed-off-by: Enric Balletbo i Serra Link: https://lore.kernel.org/r/20210420171617.3830902-1-pmalani@chromium.org --- include/linux/platform_data/cros_ec_commands.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 5ff8597ceabd..9156078c6fc6 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5678,6 +5678,7 @@ enum tcpc_cc_polarity { #define PD_STATUS_EVENT_SOP_DISC_DONE BIT(0) #define PD_STATUS_EVENT_SOP_PRIME_DISC_DONE BIT(1) +#define PD_STATUS_EVENT_HARD_RESET BIT(2) struct ec_params_typec_status { uint8_t port; -- cgit v1.2.3-71-gd317 From d99f2487e1de23a2e902d1a359a85a48bfd21fe7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 21 Apr 2021 07:48:43 -0400 Subject: NFS: The 'fattr_valid' field in struct nfs_server should be unsigned int Fix up a static compiler warning: "fs/nfs/nfs4proc.c:3882 _nfs4_server_capabilities() warn: was expecting a 64 bit value instead of '(1 << 11)'" The fix is to convert the fattr_valid field to match the type of the 'valid' field in struct nfs_fattr. Reported-by: Dan Carpenter Signed-off-by: Trond Myklebust --- include/linux/nfs_fs_sb.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index d28d7a62864f..70057b2e606e 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -156,6 +156,7 @@ struct nfs_server { #define NFS_MOUNT_WRITE_EAGER 0x01000000 #define NFS_MOUNT_WRITE_WAIT 0x02000000 + unsigned int fattr_valid; /* Valid attributes */ unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ unsigned int rpages; /* read size (in pages) */ @@ -191,8 +192,6 @@ struct nfs_server { dev_t s_dev; /* superblock dev numbers */ struct nfs_auth_info auth_info; /* parsed auth flavors */ - __u64 fattr_valid; /* Valid attributes */ - #ifdef CONFIG_NFS_FSCACHE struct nfs_fscache_key *fscache_key; /* unique key for superblock */ struct fscache_cookie *fscache; /* superblock cookie */ -- cgit v1.2.3-71-gd317 From 54526d1fd59338fd6a381dbd806b7ccbae3aa4aa Mon Sep 17 00:00:00 2001 From: Nathan Tempelman Date: Thu, 8 Apr 2021 22:32:14 +0000 Subject: KVM: x86: Support KVM VMs sharing SEV context Add a capability for userspace to mirror SEV encryption context from one vm to another. On our side, this is intended to support a Migration Helper vCPU, but it can also be used generically to support other in-guest workloads scheduled by the host. The intention is for the primary guest and the mirror to have nearly identical memslots. The primary benefits of this are that: 1) The VMs do not share KVM contexts (think APIC/MSRs/etc), so they can't accidentally clobber each other. 2) The VMs can have different memory-views, which is necessary for post-copy migration (the migration vCPUs on the target need to read and write to pages, when the primary guest would VMEXIT). This does not change the threat model for AMD SEV. Any memory involved is still owned by the primary guest and its initial state is still attested to through the normal SEV_LAUNCH_* flows. If userspace wanted to circumvent SEV, they could achieve the same effect by simply attaching a vCPU to the primary VM. This patch deliberately leaves userspace in charge of the memslots for the mirror, as it already has the power to mess with them in the primary guest. This patch does not support SEV-ES (much less SNP), as it does not handle handing off attested VMSAs to the mirror. For additional context, we need a Migration Helper because SEV PSP migration is far too slow for our live migration on its own. Using an in-guest migrator lets us speed this up significantly. Signed-off-by: Nathan Tempelman Message-Id: <20210408223214.2582277-1-natet@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 18 ++++++++- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm/sev.c | 90 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 2 + arch/x86/kvm/svm/svm.h | 2 + arch/x86/kvm/x86.c | 7 +++- include/linux/kvm_host.h | 1 + include/uapi/linux/kvm.h | 1 + virt/kvm/kvm_main.c | 6 +++ 9 files changed, 126 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 414b7fe1cf7b..fd4a84911355 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6207,6 +6207,22 @@ KVM_RUN_BUS_LOCK flag is used to distinguish between them. This capability can be used to check / enable 2nd DAWR feature provided by POWER10 processor. +7.24 KVM_CAP_VM_COPY_ENC_CONTEXT_FROM +------------------------------------- + +Architectures: x86 SEV enabled +Type: vm +Parameters: args[0] is the fd of the source vm +Returns: 0 on success; ENOTTY on error + +This capability enables userspace to copy encryption context from the vm +indicated by the fd to the vm this is called on. + +This is intended to support in-guest workloads scheduled by the host. This +allows the in-guest workload to maintain its own NPTs and keeps the two vms +from accidentally clobbering each other with interrupts and the like (separate +APIC/MSRs/etc). + 7.25 KVM_CAP_SGX_ATTRIBUTE ---------------------- @@ -6749,4 +6765,4 @@ in the kernel based fast path. If they can not be handled by the kernel, they will get passed on to user space. So user space still has to have an implementation for these despite the in kernel acceleration. -This capability is always enabled. \ No newline at end of file +This capability is always enabled. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c5c96386cc76..6e195f7df4f0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1349,6 +1349,7 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd); int (*get_msr_feature)(struct kvm_msr_entry *entry); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index bb4bf5ffb104..4bf79dbc3eeb 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -66,6 +66,11 @@ static int sev_flush_asids(void) return ret; } +static inline bool is_mirroring_enc_context(struct kvm *kvm) +{ + return !!to_kvm_svm(kvm)->sev_info.enc_context_owner; +} + /* Must be called with the sev_bitmap_lock held */ static bool __sev_recycle_asids(int min_asid, int max_asid) { @@ -1122,6 +1127,12 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) mutex_lock(&kvm->lock); + /* enc_context_owner handles all memory enc operations */ + if (is_mirroring_enc_context(kvm)) { + r = -EINVAL; + goto out; + } + switch (sev_cmd.id) { case KVM_SEV_ES_INIT: if (!sev_es) { @@ -1185,6 +1196,10 @@ int svm_register_enc_region(struct kvm *kvm, if (!sev_guest(kvm)) return -ENOTTY; + /* If kvm is mirroring encryption context it isn't responsible for it */ + if (is_mirroring_enc_context(kvm)) + return -EINVAL; + if (range->addr > ULONG_MAX || range->size > ULONG_MAX) return -EINVAL; @@ -1251,6 +1266,10 @@ int svm_unregister_enc_region(struct kvm *kvm, struct enc_region *region; int ret; + /* If kvm is mirroring encryption context it isn't responsible for it */ + if (is_mirroring_enc_context(kvm)) + return -EINVAL; + mutex_lock(&kvm->lock); if (!sev_guest(kvm)) { @@ -1281,6 +1300,71 @@ failed: return ret; } +int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd) +{ + struct file *source_kvm_file; + struct kvm *source_kvm; + struct kvm_sev_info *mirror_sev; + unsigned int asid; + int ret; + + source_kvm_file = fget(source_fd); + if (!file_is_kvm(source_kvm_file)) { + ret = -EBADF; + goto e_source_put; + } + + source_kvm = source_kvm_file->private_data; + mutex_lock(&source_kvm->lock); + + if (!sev_guest(source_kvm)) { + ret = -EINVAL; + goto e_source_unlock; + } + + /* Mirrors of mirrors should work, but let's not get silly */ + if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) { + ret = -EINVAL; + goto e_source_unlock; + } + + asid = to_kvm_svm(source_kvm)->sev_info.asid; + + /* + * The mirror kvm holds an enc_context_owner ref so its asid can't + * disappear until we're done with it + */ + kvm_get_kvm(source_kvm); + + fput(source_kvm_file); + mutex_unlock(&source_kvm->lock); + mutex_lock(&kvm->lock); + + if (sev_guest(kvm)) { + ret = -EINVAL; + goto e_mirror_unlock; + } + + /* Set enc_context_owner and copy its encryption context over */ + mirror_sev = &to_kvm_svm(kvm)->sev_info; + mirror_sev->enc_context_owner = source_kvm; + mirror_sev->asid = asid; + mirror_sev->active = true; + + mutex_unlock(&kvm->lock); + return 0; + +e_mirror_unlock: + mutex_unlock(&kvm->lock); + kvm_put_kvm(source_kvm); + return ret; +e_source_unlock: + mutex_unlock(&source_kvm->lock); +e_source_put: + fput(source_kvm_file); + return ret; +} + void sev_vm_destroy(struct kvm *kvm) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1290,6 +1374,12 @@ void sev_vm_destroy(struct kvm *kvm) if (!sev_guest(kvm)) return; + /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */ + if (is_mirroring_enc_context(kvm)) { + kvm_put_kvm(sev->enc_context_owner); + return; + } + mutex_lock(&kvm->lock); /* diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 86e42d42637e..cd8c333ed2dc 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4589,6 +4589,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + .vm_copy_enc_context_from = svm_vm_copy_asid_from, + .can_emulate_instruction = svm_can_emulate_instruction, .apic_init_signal_blocked = svm_apic_init_signal_blocked, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 7deb7a057004..454da1c1d9b7 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -68,6 +68,7 @@ struct kvm_sev_info { unsigned long pages_locked; /* Number of pages locked */ struct list_head regions_list; /* List of registered regions */ u64 ap_jump_table; /* SEV-ES AP Jump Table address */ + struct kvm *enc_context_owner; /* Owner of copied encryption context */ }; struct kvm_svm { @@ -580,6 +581,7 @@ int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range); int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range); +int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd); void pre_sev_run(struct vcpu_svm *svm, int cpu); void __init sev_hardware_setup(void); void sev_hardware_teardown(void); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d4cdab15238b..a5eeca55810f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3808,6 +3808,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) #ifdef CONFIG_X86_SGX_KVM case KVM_CAP_SGX_ATTRIBUTE: #endif + case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: r = 1; break; case KVM_CAP_SET_GUEST_DEBUG2: @@ -4714,7 +4715,6 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, kvm_update_pv_runtime(vcpu); return 0; - default: return -EINVAL; } @@ -5413,6 +5413,11 @@ split_irqchip_unlock: break; } #endif + case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: + r = -EINVAL; + if (kvm_x86_ops.vm_copy_enc_context_from) + r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]); + return r; default: r = -EINVAL; break; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 82b066db37cb..c306b4c3674e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -654,6 +654,7 @@ void kvm_exit(void); void kvm_get_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); +bool file_is_kvm(struct file *file); void kvm_put_kvm_no_destroy(struct kvm *kvm); static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 162b7e044c8b..37f0a329da6a 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1080,6 +1080,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_DAWR1 194 #define KVM_CAP_SET_GUEST_DEBUG2 195 #define KVM_CAP_SGX_ATTRIBUTE 196 +#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 545a28387c33..48c1e4842908 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4197,6 +4197,12 @@ static struct file_operations kvm_vm_fops = { KVM_COMPAT(kvm_vm_compat_ioctl), }; +bool file_is_kvm(struct file *file) +{ + return file && file->f_op == &kvm_vm_fops; +} +EXPORT_SYMBOL_GPL(file_is_kvm); + static int kvm_dev_ioctl_create_vm(unsigned long type) { int r; -- cgit v1.2.3-71-gd317 From 52acd22faa1af8a0514ccd075a6978ac97986425 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Fri, 16 Apr 2021 11:08:10 +0800 Subject: KVM: Boost vCPU candidate in user mode which is delivering interrupt Both lock holder vCPU and IPI receiver that has halted are condidate for boost. However, the PLE handler was originally designed to deal with the lock holder preemption problem. The Intel PLE occurs when the spinlock waiter is in kernel mode. This assumption doesn't hold for IPI receiver, they can be in either kernel or user mode. the vCPU candidate in user mode will not be boosted even if they should respond to IPIs. Some benchmarks like pbzip2, swaptions etc do the TLB shootdown in kernel mode and most of the time they are running in user mode. It can lead to a large number of continuous PLE events because the IPI sender causes PLE events repeatedly until the receiver is scheduled while the receiver is not candidate for a boost. This patch boosts the vCPU candidiate in user mode which is delivery interrupt. We can observe the speed of pbzip2 improves 10% in 96 vCPUs VM in over-subscribe scenario (The host machine is 2 socket, 48 cores, 96 HTs Intel CLX box). There is no performance regression for other benchmarks like Unixbench spawn (most of the time contend read/write lock in kernel mode), ebizzy (most of the time contend read/write sem and TLB shoodtdown in kernel mode). Signed-off-by: Wanpeng Li Message-Id: <1618542490-14756-1-git-send-email-wanpengli@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 8 ++++++++ include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 8 +++++++- 3 files changed, 16 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a5eeca55810f..c9ba6f2d9bcd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11093,6 +11093,14 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) return false; } +bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) + return true; + + return false; +} + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { return vcpu->arch.preempted_in_kernel; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c306b4c3674e..8895b95b6a22 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -960,6 +960,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); +bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_post_init_vm(struct kvm *kvm); void kvm_arch_pre_destroy_vm(struct kvm *kvm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 48c1e4842908..2799c6660cce 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3137,6 +3137,11 @@ static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) return false; } +bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) +{ + return false; +} + void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) { struct kvm *kvm = me->kvm; @@ -3170,7 +3175,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) !vcpu_dy_runnable(vcpu)) continue; if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && - !kvm_arch_vcpu_in_kernel(vcpu)) + !kvm_arch_dy_has_pending_interrupt(vcpu) && + !kvm_arch_vcpu_in_kernel(vcpu)) continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; -- cgit v1.2.3-71-gd317 From 4cfdd47d6d95aca4fb8d6cfbe73392472d353f82 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 15 Apr 2021 15:53:14 +0000 Subject: KVM: SVM: Add KVM_SEV SEND_START command The command is used to create an outgoing SEV guest encryption context. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford Reviewed-by: Venu Busireddy Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Message-Id: <2f1686d0164e0f1b3d6a41d620408393e0a48376.1618498113.git.ashish.kalra@amd.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/amd-memory-encryption.rst | 30 ++++++ arch/x86/kvm/svm/sev.c | 128 +++++++++++++++++++++++ include/linux/psp-sev.h | 8 +- include/uapi/linux/kvm.h | 12 +++ 4 files changed, 174 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index 34ce2d1fcb89..db5c3fb2bab5 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -290,6 +290,36 @@ Returns: 0 on success, -negative on error __u32 len; }; +11. KVM_SEV_SEND_START +---------------------- + +The KVM_SEV_SEND_START command can be used by the hypervisor to create an +outgoing guest encryption context. + +If session_len is zero on entry, the length of the guest session information is +written to session_len and all other fields are not used. + +Parameters (in): struct kvm_sev_send_start + +Returns: 0 on success, -negative on error + +:: + struct kvm_sev_send_start { + __u32 policy; /* guest policy */ + + __u64 pdh_cert_uaddr; /* platform Diffie-Hellman certificate */ + __u32 pdh_cert_len; + + __u64 plat_certs_uaddr; /* platform certificate chain */ + __u32 plat_certs_len; + + __u64 amd_certs_uaddr; /* AMD certificate */ + __u32 amd_certs_len; + + __u64 session_uaddr; /* Guest session information */ + __u32 session_len; + }; + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 4bf79dbc3eeb..9f5312956d5e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1111,6 +1111,131 @@ e_free: return ret; } +/* Userspace wants to query session length. */ +static int +__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_sev_send_start *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_start *data; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (data == NULL) + return -ENOMEM; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, data, &argp->error); + if (ret < 0) + goto out; + + params->session_len = data->session_len; + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, + sizeof(struct kvm_sev_send_start))) + ret = -EFAULT; + +out: + kfree(data); + return ret; +} + +static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_start *data; + struct kvm_sev_send_start params; + void *amd_certs, *session_data; + void *pdh_cert, *plat_certs; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_send_start))) + return -EFAULT; + + /* if session_len is zero, userspace wants to query the session length */ + if (!params.session_len) + return __sev_send_start_query_session_length(kvm, argp, + ¶ms); + + /* some sanity checks */ + if (!params.pdh_cert_uaddr || !params.pdh_cert_len || + !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE) + return -EINVAL; + + /* allocate the memory to hold the session data blob */ + session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT); + if (!session_data) + return -ENOMEM; + + /* copy the certificate blobs from userspace */ + pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr, + params.pdh_cert_len); + if (IS_ERR(pdh_cert)) { + ret = PTR_ERR(pdh_cert); + goto e_free_session; + } + + plat_certs = psp_copy_user_blob(params.plat_certs_uaddr, + params.plat_certs_len); + if (IS_ERR(plat_certs)) { + ret = PTR_ERR(plat_certs); + goto e_free_pdh; + } + + amd_certs = psp_copy_user_blob(params.amd_certs_uaddr, + params.amd_certs_len); + if (IS_ERR(amd_certs)) { + ret = PTR_ERR(amd_certs); + goto e_free_plat_cert; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (data == NULL) { + ret = -ENOMEM; + goto e_free_amd_cert; + } + + /* populate the FW SEND_START field with system physical address */ + data->pdh_cert_address = __psp_pa(pdh_cert); + data->pdh_cert_len = params.pdh_cert_len; + data->plat_certs_address = __psp_pa(plat_certs); + data->plat_certs_len = params.plat_certs_len; + data->amd_certs_address = __psp_pa(amd_certs); + data->amd_certs_len = params.amd_certs_len; + data->session_address = __psp_pa(session_data); + data->session_len = params.session_len; + data->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, data, &argp->error); + + if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr, + session_data, params.session_len)) { + ret = -EFAULT; + goto e_free; + } + + params.policy = data->policy; + params.session_len = data->session_len; + if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, + sizeof(struct kvm_sev_send_start))) + ret = -EFAULT; + +e_free: + kfree(data); +e_free_amd_cert: + kfree(amd_certs); +e_free_plat_cert: + kfree(plat_certs); +e_free_pdh: + kfree(pdh_cert); +e_free_session: + kfree(session_data); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1173,6 +1298,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_GET_ATTESTATION_REPORT: r = sev_get_attestation_report(kvm, &sev_cmd); break; + case KVM_SEV_SEND_START: + r = sev_send_start(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index b801ead1e2bb..73da511b9423 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -326,11 +326,11 @@ struct sev_data_send_start { u64 pdh_cert_address; /* In */ u32 pdh_cert_len; /* In */ u32 reserved1; - u64 plat_cert_address; /* In */ - u32 plat_cert_len; /* In */ + u64 plat_certs_address; /* In */ + u32 plat_certs_len; /* In */ u32 reserved2; - u64 amd_cert_address; /* In */ - u32 amd_cert_len; /* In */ + u64 amd_certs_address; /* In */ + u32 amd_certs_len; /* In */ u32 reserved3; u64 session_address; /* In */ u32 session_len; /* In/Out */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 37f0a329da6a..11b3e528d35d 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1732,6 +1732,18 @@ struct kvm_sev_attestation_report { __u32 len; }; +struct kvm_sev_send_start { + __u32 policy; + __u64 pdh_cert_uaddr; + __u32 pdh_cert_len; + __u64 plat_certs_uaddr; + __u32 plat_certs_len; + __u64 amd_certs_uaddr; + __u32 amd_certs_len; + __u64 session_uaddr; + __u32 session_len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- cgit v1.2.3-71-gd317 From 5569e2e7a650dfffd4df7635662b2f92162d6501 Mon Sep 17 00:00:00 2001 From: Steve Rutherford Date: Tue, 20 Apr 2021 05:01:20 -0400 Subject: KVM: SVM: Add support for KVM_SEV_SEND_CANCEL command After completion of SEND_START, but before SEND_FINISH, the source VMM can issue the SEND_CANCEL command to stop a migration. This is necessary so that a cancelled migration can restart with a new target later. Reviewed-by: Nathan Tempelman Reviewed-by: Brijesh Singh Signed-off-by: Steve Rutherford Message-Id: <20210412194408.2458827-1-srutherford@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/amd-memory-encryption.rst | 9 +++++++++ arch/x86/kvm/svm/sev.c | 23 +++++++++++++++++++++++ drivers/crypto/ccp/sev-dev.c | 1 + include/linux/psp-sev.h | 10 ++++++++++ include/uapi/linux/kvm.h | 2 ++ 5 files changed, 45 insertions(+) (limited to 'include/linux') diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index 03f2518cfbeb..c36a12975763 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -356,6 +356,15 @@ issued by the hypervisor to delete the encryption context. Returns: 0 on success, -negative on error +14. KVM_SEV_SEND_CANCEL +------------------------ + +After completion of SEND_START, but before SEND_FINISH, the source VMM can issue the +SEND_CANCEL command to stop a migration. This is necessary so that a cancelled +migration can restart with a new target later. + +Returns: 0 on success, -negative on error + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index f25c52a2dc14..552f47787143 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1377,6 +1377,26 @@ static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_cancel *data; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, data, &argp->error); + + kfree(data); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1448,6 +1468,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_FINISH: r = sev_send_finish(kvm, &sev_cmd); break; + case KVM_SEV_SEND_CANCEL: + r = sev_send_cancel(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index cb9b4c4e371e..4172a1afa0db 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -129,6 +129,7 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); + case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); default: return 0; } diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 73da511b9423..d48a7192e881 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -73,6 +73,7 @@ enum sev_cmd { SEV_CMD_SEND_UPDATE_DATA = 0x041, SEV_CMD_SEND_UPDATE_VMSA = 0x042, SEV_CMD_SEND_FINISH = 0x043, + SEV_CMD_SEND_CANCEL = 0x044, /* Guest migration commands (incoming) */ SEV_CMD_RECEIVE_START = 0x050, @@ -392,6 +393,15 @@ struct sev_data_send_finish { u32 handle; /* In */ } __packed; +/** + * struct sev_data_send_cancel - SEND_CANCEL command parameters + * + * @handle: handle of the VM to process + */ +struct sev_data_send_cancel { + u32 handle; /* In */ +} __packed; + /** * struct sev_data_receive_start - RECEIVE_START command parameters * diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index d07be69f402f..5d1adeb13739 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1674,6 +1674,8 @@ enum sev_cmd_id { KVM_SEV_CERT_EXPORT, /* Attestation report */ KVM_SEV_GET_ATTESTATION_REPORT, + /* Guest Migration Extension */ + KVM_SEV_SEND_CANCEL, KVM_SEV_NR_MAX, }; -- cgit v1.2.3-71-gd317 From 1de15e99a242a66ef4f803fe1ad357f86b3a75f8 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 12 Apr 2021 17:07:39 +0300 Subject: pinctrl: Keep enum pin_config_param ordered by name It seems the ordering is by name. Keep it that way. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20210412140741.39946-1-andriy.shevchenko@linux.intel.com Signed-off-by: Linus Walleij --- include/linux/pinctrl/pinconf-generic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 6aeb711f7cd1..188db8d84162 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -90,6 +90,7 @@ struct pinctrl_map; * value on the line. Use argument 1 to indicate high level, argument 0 to * indicate low level. (Please see Documentation/driver-api/pinctl.rst, * section "GPIO mode pitfalls" for a discussion around this parameter.) + * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power * supplies, the argument to this parameter (on a custom format) tells * the driver which alternative power source to use. @@ -101,7 +102,6 @@ struct pinctrl_map; * or latch delay (on outputs) this parameter (in a custom format) * specifies the clock skew or latch delay. It typically controls how * many double inverters are put in front of the line. - * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. @@ -127,11 +127,11 @@ enum pin_config_param { PIN_CONFIG_LOW_POWER_MODE, PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_OUTPUT, + PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_POWER_SOURCE, PIN_CONFIG_SLEEP_HARDWARE_STATE, PIN_CONFIG_SLEW_RATE, PIN_CONFIG_SKEW_DELAY, - PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_END = 0x7F, PIN_CONFIG_MAX = 0xFF, }; -- cgit v1.2.3-71-gd317 From 31f9a421a1d01538776db37ec9c5419a3a49d650 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 12 Apr 2021 17:07:40 +0300 Subject: pinctrl: Introduce MODE group in enum pin_config_param Better to have a MODE group of settings to keep them together when ordered alphabetically. Hence, rename PIN_CONFIG_LOW_POWER_MODE to PIN_CONFIG_MODE_LOW_POWER. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20210412140741.39946-2-andriy.shevchenko@linux.intel.com Signed-off-by: Linus Walleij --- drivers/pinctrl/pinconf-generic.c | 6 +++--- drivers/pinctrl/pinctrl-lpc18xx.c | 4 ++-- drivers/pinctrl/pinctrl-single.c | 6 +++--- drivers/pinctrl/pinctrl-zynq.c | 4 ++-- drivers/pinctrl/pxa/pinctrl-pxa2xx.c | 4 ++-- drivers/soc/tegra/pmc.c | 4 ++-- include/linux/pinctrl/pinconf-generic.h | 4 ++-- 7 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 1e225d513988..22e8d4c4040e 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -43,7 +43,7 @@ static const struct pin_config_item conf_items[] = { PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false), PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false), PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false), - PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true), + PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true), PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false), PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true), PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true), @@ -174,8 +174,8 @@ static const struct pinconf_generic_params dt_params[] = { { "input-schmitt", PIN_CONFIG_INPUT_SCHMITT, 0 }, { "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 }, { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, - { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, - { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 }, + { "low-power-disable", PIN_CONFIG_MODE_LOW_POWER, 0 }, + { "low-power-enable", PIN_CONFIG_MODE_LOW_POWER, 1 }, { "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 }, { "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 }, { "output-high", PIN_CONFIG_OUTPUT, 1, }, diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c index 7b2f885e68bd..ed9bf2c89998 100644 --- a/drivers/pinctrl/pinctrl-lpc18xx.c +++ b/drivers/pinctrl/pinctrl-lpc18xx.c @@ -646,7 +646,7 @@ static const struct pin_config_item lpc18xx_conf_items[ARRAY_SIZE(lpc18xx_params static int lpc18xx_pconf_get_usb1(enum pin_config_param param, int *arg, u32 reg) { switch (param) { - case PIN_CONFIG_LOW_POWER_MODE: + case PIN_CONFIG_MODE_LOW_POWER: if (reg & LPC18XX_SCU_USB1_EPWR) *arg = 0; else @@ -904,7 +904,7 @@ static int lpc18xx_pconf_set_usb1(struct pinctrl_dev *pctldev, u32 param_val, u32 *reg) { switch (param) { - case PIN_CONFIG_LOW_POWER_MODE: + case PIN_CONFIG_MODE_LOW_POWER: if (param_val) *reg &= ~LPC18XX_SCU_USB1_EPWR; else diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 39aac32ed09c..2c9c9835f375 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -533,7 +533,7 @@ static int pcs_pinconf_get(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_DRIVE_STRENGTH: case PIN_CONFIG_SLEW_RATE: - case PIN_CONFIG_LOW_POWER_MODE: + case PIN_CONFIG_MODE_LOW_POWER: default: *config = data; break; @@ -571,7 +571,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev, case PIN_CONFIG_INPUT_SCHMITT: case PIN_CONFIG_DRIVE_STRENGTH: case PIN_CONFIG_SLEW_RATE: - case PIN_CONFIG_LOW_POWER_MODE: + case PIN_CONFIG_MODE_LOW_POWER: shift = ffs(func->conf[i].mask) - 1; data &= ~func->conf[i].mask; data |= (arg << shift) & func->conf[i].mask; @@ -919,7 +919,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, { "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, }, { "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, }, { "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, }, - { "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, }, + { "pinctrl-single,low-power-mode", PIN_CONFIG_MODE_LOW_POWER, }, }; static const struct pcs_conf_type prop4[] = { { "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, }, diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c index c6052a0e827a..5fb924a2eedd 100644 --- a/drivers/pinctrl/pinctrl-zynq.c +++ b/drivers/pinctrl/pinctrl-zynq.c @@ -1016,7 +1016,7 @@ static int zynq_pinconf_cfg_get(struct pinctrl_dev *pctldev, case PIN_CONFIG_SLEW_RATE: arg = !!(reg & ZYNQ_PINCONF_SPEED); break; - case PIN_CONFIG_LOW_POWER_MODE: + case PIN_CONFIG_MODE_LOW_POWER: { enum zynq_io_standards iostd = zynq_pinconf_iostd_get(reg); @@ -1087,7 +1087,7 @@ static int zynq_pinconf_cfg_set(struct pinctrl_dev *pctldev, reg &= ~ZYNQ_PINCONF_IOTYPE_MASK; reg |= arg << ZYNQ_PINCONF_IOTYPE_SHIFT; break; - case PIN_CONFIG_LOW_POWER_MODE: + case PIN_CONFIG_MODE_LOW_POWER: if (arg) reg |= ZYNQ_PINCONF_DISABLE_RECVR; else diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c index eab029a21643..d2568dab8c78 100644 --- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c +++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c @@ -194,7 +194,7 @@ static int pxa2xx_pconf_group_get(struct pinctrl_dev *pctldev, spin_lock_irqsave(&pctl->lock, flags); val = readl_relaxed(pgsr) & BIT(pin % 32); - *config = val ? PIN_CONFIG_LOW_POWER_MODE : 0; + *config = val ? PIN_CONFIG_MODE_LOW_POWER : 0; spin_unlock_irqrestore(&pctl->lock, flags); dev_dbg(pctl->dev, "get sleep gpio state(pin=%d) %d\n", @@ -217,7 +217,7 @@ static int pxa2xx_pconf_group_set(struct pinctrl_dev *pctldev, for (i = 0; i < num_configs; i++) { switch (pinconf_to_config_param(configs[i])) { - case PIN_CONFIG_LOW_POWER_MODE: + case PIN_CONFIG_MODE_LOW_POWER: is_set = pinconf_to_config_argument(configs[i]); break; default: diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index df9a5ca8c99c..46bcdbef1675 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1738,7 +1738,7 @@ static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev, arg = ret; break; - case PIN_CONFIG_LOW_POWER_MODE: + case PIN_CONFIG_MODE_LOW_POWER: ret = tegra_io_pad_is_powered(pmc, pad->id); if (ret < 0) return ret; @@ -1775,7 +1775,7 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev, arg = pinconf_to_config_argument(configs[i]); switch (param) { - case PIN_CONFIG_LOW_POWER_MODE: + case PIN_CONFIG_MODE_LOW_POWER: if (arg) err = tegra_io_pad_power_disable(pad->id); else diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 188db8d84162..189e701832ea 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -76,7 +76,7 @@ struct pinctrl_map; * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, * schmitt-trigger mode is disabled. - * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power + * @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 * to indicate low power mode, argument 0 turns low power mode off. @@ -124,7 +124,7 @@ enum pin_config_param { PIN_CONFIG_INPUT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, PIN_CONFIG_INPUT_SCHMITT_ENABLE, - PIN_CONFIG_LOW_POWER_MODE, + PIN_CONFIG_MODE_LOW_POWER, PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_OUTPUT, PIN_CONFIG_PERSIST_STATE, -- cgit v1.2.3-71-gd317 From 09e11caaa4cffac681963688b774e1aa3063b3a9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 12 Apr 2021 17:07:41 +0300 Subject: pinctrl: Add PIN_CONFIG_MODE_PWM to enum pin_config_param It seems that we will have more and more pin controllers that support PWM function on the (selected) pins. Due to it being a part of pin controller IP the idea is to have some code that will switch the mode and attach the corresponding driver, for example, via using it as a library. Meanwhile, put a corresponding item to the pin_config_param enumerator. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20210412140741.39946-3-andriy.shevchenko@linux.intel.com Signed-off-by: Linus Walleij --- include/linux/pinctrl/pinconf-generic.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 189e701832ea..e18ab3d5908f 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -80,6 +80,7 @@ struct pinctrl_map; * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 * to indicate low power mode, argument 0 turns low power mode off. + * @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode * without driving a value there. For most platforms this reduces to * enable the output buffers and then let the pin controller current @@ -125,6 +126,7 @@ enum pin_config_param { PIN_CONFIG_INPUT_SCHMITT, PIN_CONFIG_INPUT_SCHMITT_ENABLE, PIN_CONFIG_MODE_LOW_POWER, + PIN_CONFIG_MODE_PWM, PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_OUTPUT, PIN_CONFIG_PERSIST_STATE, -- cgit v1.2.3-71-gd317 From 85367040511f8402d7e4054d8c17b053c75e33ff Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 21 Apr 2021 23:45:26 +0800 Subject: scsi: blk-mq: Fix build warning when making htmldocs Fixes the following warning when running 'make htmldocs': include/linux/blk-mq.h:395: warning: Function parameter or member 'set_rq_budget_token' not described in 'blk_mq_ops' include/linux/blk-mq.h:395: warning: Function parameter or member 'get_rq_budget_token' not described in 'blk_mq_ops' [mkp: added warning messages] Link: https://lore.kernel.org/r/20210421154526.1954174-1-ming.lei@redhat.com Fixes: d022d18c045f ("scsi: blk-mq: Add callbacks for storing & retrieving budget token") Reported-by: Stephen Rothwell Signed-off-by: Ming Lei Signed-off-by: Martin K. Petersen --- include/linux/blk-mq.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3bd3ee651143..359486940fa0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -313,12 +313,12 @@ struct blk_mq_ops { */ void (*put_budget)(struct request_queue *, int); - /* - * @set_rq_budget_toekn: store rq's budget token + /** + * @set_rq_budget_token: store rq's budget token */ void (*set_rq_budget_token)(struct request *, int); - /* - * @get_rq_budget_toekn: retrieve rq's budget token + /** + * @get_rq_budget_token: retrieve rq's budget token */ int (*get_rq_budget_token)(struct request *); -- cgit v1.2.3-71-gd317 From fa989ae7c7b38efbc6c3370571fb8a6f7350029a Mon Sep 17 00:00:00 2001 From: Sai Krishna Potthuri Date: Thu, 22 Apr 2021 14:00:00 +0530 Subject: firmware: xilinx: Add pinctrl support Adding pinctrl support to query platform specific information (pins) from firmware. Signed-off-by: Sai Krishna Potthuri Acked-by: Michal Simek Link: https://lore.kernel.org/r/1619080202-31924-2-git-send-email-lakshmi.sai.krishna.potthuri@xilinx.com Signed-off-by: Linus Walleij --- drivers/firmware/xilinx/zynqmp.c | 114 +++++++++++++++++++++++++++++++++++ include/linux/firmware/xlnx-zynqmp.h | 90 +++++++++++++++++++++++++++ 2 files changed, 204 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 7eb9958662dd..fc01a3632815 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -811,6 +811,120 @@ int zynqmp_pm_fpga_get_status(u32 *value) } EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status); +/** + * zynqmp_pm_pinctrl_request - Request Pin from firmware + * @pin: Pin number to request + * + * This function requests pin from firmware. + * + * Return: Returns status, either success or error+reason. + */ +int zynqmp_pm_pinctrl_request(const u32 pin) +{ + return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request); + +/** + * zynqmp_pm_pinctrl_release - Inform firmware that Pin control is released + * @pin: Pin number to release + * + * This function release pin from firmware. + * + * Return: Returns status, either success or error+reason. + */ +int zynqmp_pm_pinctrl_release(const u32 pin) +{ + return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_release); + +/** + * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin + * @pin: Pin number + * @id: Buffer to store function ID + * + * This function provides the function currently set for the given pin. + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + int ret; + + if (!id) + return -EINVAL; + + ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0, + 0, 0, ret_payload); + *id = ret_payload[1]; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function); + +/** + * zynqmp_pm_pinctrl_set_function - Set requested function for the pin + * @pin: Pin number + * @id: Function ID to set + * + * This function sets requested function for the given pin. + * + * Return: Returns status, either success or error+reason. + */ +int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id) +{ + return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id, + 0, 0, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_function); + +/** + * zynqmp_pm_pinctrl_get_config - Get configuration parameter for the pin + * @pin: Pin number + * @param: Parameter to get + * @value: Buffer to store parameter value + * + * This function gets requested configuration parameter for the given pin. + * + * Return: Returns status, either success or error+reason. + */ +int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, + u32 *value) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + int ret; + + if (!value) + return -EINVAL; + + ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param, + 0, 0, ret_payload); + *value = ret_payload[1]; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_config); + +/** + * zynqmp_pm_pinctrl_set_config - Set configuration parameter for the pin + * @pin: Pin number + * @param: Parameter to set + * @value: Parameter value to set + * + * This function sets requested configuration parameter for the given pin. + * + * Return: Returns status, either success or error+reason. + */ +int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, + u32 value) +{ + return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin, + param, value, 0, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_config); + /** * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller * master has initialized its own power management diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 71177b17eee5..8285a4bcfc2d 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -72,6 +72,12 @@ enum pm_api_id { PM_FPGA_LOAD = 22, PM_FPGA_GET_STATUS = 23, PM_GET_CHIPID = 24, + PM_PINCTRL_REQUEST = 28, + PM_PINCTRL_RELEASE = 29, + PM_PINCTRL_GET_FUNCTION = 30, + PM_PINCTRL_SET_FUNCTION = 31, + PM_PINCTRL_CONFIG_PARAM_GET = 32, + PM_PINCTRL_CONFIG_PARAM_SET = 33, PM_IOCTL = 34, PM_QUERY_DATA = 35, PM_CLOCK_ENABLE = 36, @@ -122,6 +128,12 @@ enum pm_query_id { PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3, PM_QID_CLOCK_GET_PARENTS = 4, PM_QID_CLOCK_GET_ATTRIBUTES = 5, + PM_QID_PINCTRL_GET_NUM_PINS = 6, + PM_QID_PINCTRL_GET_NUM_FUNCTIONS = 7, + PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS = 8, + PM_QID_PINCTRL_GET_FUNCTION_NAME = 9, + PM_QID_PINCTRL_GET_FUNCTION_GROUPS = 10, + PM_QID_PINCTRL_GET_PIN_GROUPS = 11, PM_QID_CLOCK_GET_NUM_CLOCKS = 12, PM_QID_CLOCK_GET_MAX_DIVISOR = 13, }; @@ -285,6 +297,44 @@ enum dll_reset_type { PM_DLL_RESET_PULSE = 2, }; +enum pm_pinctrl_config_param { + PM_PINCTRL_CONFIG_SLEW_RATE = 0, + PM_PINCTRL_CONFIG_BIAS_STATUS = 1, + PM_PINCTRL_CONFIG_PULL_CTRL = 2, + PM_PINCTRL_CONFIG_SCHMITT_CMOS = 3, + PM_PINCTRL_CONFIG_DRIVE_STRENGTH = 4, + PM_PINCTRL_CONFIG_VOLTAGE_STATUS = 5, + PM_PINCTRL_CONFIG_TRI_STATE = 6, + PM_PINCTRL_CONFIG_MAX = 7, +}; + +enum pm_pinctrl_slew_rate { + PM_PINCTRL_SLEW_RATE_FAST = 0, + PM_PINCTRL_SLEW_RATE_SLOW = 1, +}; + +enum pm_pinctrl_bias_status { + PM_PINCTRL_BIAS_DISABLE = 0, + PM_PINCTRL_BIAS_ENABLE = 1, +}; + +enum pm_pinctrl_pull_ctrl { + PM_PINCTRL_BIAS_PULL_DOWN = 0, + PM_PINCTRL_BIAS_PULL_UP = 1, +}; + +enum pm_pinctrl_schmitt_cmos { + PM_PINCTRL_INPUT_TYPE_CMOS = 0, + PM_PINCTRL_INPUT_TYPE_SCHMITT = 1, +}; + +enum pm_pinctrl_drive_strength { + PM_PINCTRL_DRIVE_STRENGTH_2MA = 0, + PM_PINCTRL_DRIVE_STRENGTH_4MA = 1, + PM_PINCTRL_DRIVE_STRENGTH_8MA = 2, + PM_PINCTRL_DRIVE_STRENGTH_12MA = 3, +}; + enum zynqmp_pm_shutdown_type { ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0, ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1, @@ -353,6 +403,14 @@ int zynqmp_pm_write_pggs(u32 index, u32 value); int zynqmp_pm_read_pggs(u32 index, u32 *value); int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype); int zynqmp_pm_set_boot_health_status(u32 value); +int zynqmp_pm_pinctrl_request(const u32 pin); +int zynqmp_pm_pinctrl_release(const u32 pin); +int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id); +int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id); +int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, + u32 *value); +int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, + u32 value); #else static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) { @@ -537,6 +595,38 @@ static inline int zynqmp_pm_set_boot_health_status(u32 value) { return -ENODEV; } + +static inline int zynqmp_pm_pinctrl_request(const u32 pin) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_release(const u32 pin) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, + u32 *value) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, + u32 value) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ -- cgit v1.2.3-71-gd317 From d60d6e7adfc3814f6de03c978ff1daab21478f87 Mon Sep 17 00:00:00 2001 From: Thara Gopinath Date: Thu, 21 Jan 2021 21:34:05 -0500 Subject: thermal/core: Remove thermal_notify_framework thermal_notify_framework just updates for a single trip point where as thermal_zone_device_update does other bookkeeping like updating the temperature of the thermal zone and setting the next trip point. The only driver that was using thermal_notify_framework was updated in the previous patch to use thermal_zone_device_update instead. Since there are no users for thermal_notify_framework remove it. Signed-off-by: Thara Gopinath Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20210122023406.3500424-3-thara.gopinath@linaro.org --- drivers/thermal/thermal_core.c | 18 ------------------ include/linux/thermal.h | 5 ----- 2 files changed, 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 3566fd291399..d20b25f40d19 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -561,24 +561,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz, } EXPORT_SYMBOL_GPL(thermal_zone_device_update); -/** - * thermal_notify_framework - Sensor drivers use this API to notify framework - * @tz: thermal zone device - * @trip: indicates which trip point has been crossed - * - * This function handles the trip events from sensor drivers. It starts - * throttling the cooling devices according to the policy configured. - * For CRITICAL and HOT trip points, this notifies the respective drivers, - * and does actual throttling for other trip points i.e ACTIVE and PASSIVE. - * The throttling policy is based on the configured platform data; if no - * platform data is provided, this uses the step_wise throttling policy. - */ -void thermal_notify_framework(struct thermal_zone_device *tz, int trip) -{ - handle_thermal_trip(tz, trip); -} -EXPORT_SYMBOL_GPL(thermal_notify_framework); - static void thermal_zone_device_check(struct work_struct *work) { struct thermal_zone_device *tz = container_of(work, struct diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 169502164364..d296f3b88fb9 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -390,7 +390,6 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -void thermal_notify_framework(struct thermal_zone_device *, int); int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); void thermal_zone_device_critical(struct thermal_zone_device *tz); @@ -436,10 +435,6 @@ static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline void thermal_notify_framework(struct thermal_zone_device *tz, - int trip) -{ } - static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) { return -ENODEV; } -- cgit v1.2.3-71-gd317 From 7f318847a0f37b96d8927e8d30ae7b8f149b11f1 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 14 Apr 2021 14:44:09 +0100 Subject: perf: Get rid of oprofile leftovers perf_pmu_name() and perf_num_counters() are unused. Drop them. Signed-off-by: Marc Zyngier Acked-by: Will Deacon Link: https://lore.kernel.org/r/20210414134409.1266357-6-maz@kernel.org --- include/linux/perf_event.h | 2 -- kernel/events/core.c | 5 ----- 2 files changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3f7f89ea5e51..51154ed9a206 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -951,8 +951,6 @@ extern void perf_event_itrace_started(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); extern void perf_pmu_unregister(struct pmu *pmu); -extern int perf_num_counters(void); -extern const char *perf_pmu_name(void); extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *prev, diff --git a/kernel/events/core.c b/kernel/events/core.c index 03db40f6cba9..88cb0ba5690b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -580,11 +580,6 @@ static u64 perf_event_time(struct perf_event *event); void __weak perf_event_print_debug(void) { } -extern __weak const char *perf_pmu_name(void) -{ - return "pmu"; -} - static inline u64 perf_clock(void) { return local_clock(); -- cgit v1.2.3-71-gd317 From 1aea7808372eee4ad01f98e064c88c57f1e94855 Mon Sep 17 00:00:00 2001 From: Casey Schaufler Date: Thu, 22 Apr 2021 17:41:15 +0200 Subject: LSM: Infrastructure management of the superblock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move management of the superblock->sb_security blob out of the individual security modules and into the security infrastructure. Instead of allocating the blobs from within the modules, the modules tell the infrastructure how much space is required, and the space is allocated there. Cc: John Johansen Signed-off-by: Casey Schaufler Signed-off-by: Mickaël Salaün Reviewed-by: Stephen Smalley Acked-by: Serge Hallyn Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20210422154123.13086-6-mic@digikod.net Signed-off-by: James Morris --- include/linux/lsm_hooks.h | 1 + security/security.c | 46 ++++++++++++++++++++++++++----- security/selinux/hooks.c | 58 +++++++++++++++------------------------ security/selinux/include/objsec.h | 6 ++++ security/selinux/ss/services.c | 3 +- security/smack/smack.h | 6 ++++ security/smack/smack_lsm.c | 35 ++++++----------------- 7 files changed, 85 insertions(+), 70 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index fb7f3193753d..75715998a95f 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1573,6 +1573,7 @@ struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_inode; + int lbs_superblock; int lbs_ipc; int lbs_msg_msg; int lbs_task; diff --git a/security/security.c b/security/security.c index 5ac96b16f8fa..e9c29480eb18 100644 --- a/security/security.c +++ b/security/security.c @@ -203,6 +203,7 @@ static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed) lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode); lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc); lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg); + lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock); lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task); } @@ -333,12 +334,13 @@ static void __init ordered_lsm_init(void) for (lsm = ordered_lsms; *lsm; lsm++) prepare_lsm(*lsm); - init_debug("cred blob size = %d\n", blob_sizes.lbs_cred); - init_debug("file blob size = %d\n", blob_sizes.lbs_file); - init_debug("inode blob size = %d\n", blob_sizes.lbs_inode); - init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc); - init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg); - init_debug("task blob size = %d\n", blob_sizes.lbs_task); + init_debug("cred blob size = %d\n", blob_sizes.lbs_cred); + init_debug("file blob size = %d\n", blob_sizes.lbs_file); + init_debug("inode blob size = %d\n", blob_sizes.lbs_inode); + init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc); + init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg); + init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock); + init_debug("task blob size = %d\n", blob_sizes.lbs_task); /* * Create any kmem_caches needed for blobs @@ -670,6 +672,27 @@ static void __init lsm_early_task(struct task_struct *task) panic("%s: Early task alloc failed.\n", __func__); } +/** + * lsm_superblock_alloc - allocate a composite superblock blob + * @sb: the superblock that needs a blob + * + * Allocate the superblock blob for all the modules + * + * Returns 0, or -ENOMEM if memory can't be allocated. + */ +static int lsm_superblock_alloc(struct super_block *sb) +{ + if (blob_sizes.lbs_superblock == 0) { + sb->s_security = NULL; + return 0; + } + + sb->s_security = kzalloc(blob_sizes.lbs_superblock, GFP_KERNEL); + if (sb->s_security == NULL) + return -ENOMEM; + return 0; +} + /* * The default value of the LSM hook is defined in linux/lsm_hook_defs.h and * can be accessed with: @@ -867,12 +890,21 @@ int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter * int security_sb_alloc(struct super_block *sb) { - return call_int_hook(sb_alloc_security, 0, sb); + int rc = lsm_superblock_alloc(sb); + + if (unlikely(rc)) + return rc; + rc = call_int_hook(sb_alloc_security, 0, sb); + if (unlikely(rc)) + security_sb_free(sb); + return rc; } void security_sb_free(struct super_block *sb) { call_void_hook(sb_free_security, sb); + kfree(sb->s_security); + sb->s_security = NULL; } void security_free_mnt_opts(void **mnt_opts) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index ddd097790d47..2ed9c995263a 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -322,7 +322,7 @@ static void inode_free_security(struct inode *inode) if (!isec) return; - sbsec = inode->i_sb->s_security; + sbsec = selinux_superblock(inode->i_sb); /* * As not all inode security structures are in a list, we check for * empty list outside of the lock to make sure that we won't waste @@ -340,13 +340,6 @@ static void inode_free_security(struct inode *inode) } } -static void superblock_free_security(struct super_block *sb) -{ - struct superblock_security_struct *sbsec = sb->s_security; - sb->s_security = NULL; - kfree(sbsec); -} - struct selinux_mnt_opts { const char *fscontext, *context, *rootcontext, *defcontext; }; @@ -458,7 +451,7 @@ static int selinux_is_genfs_special_handling(struct super_block *sb) static int selinux_is_sblabel_mnt(struct super_block *sb) { - struct superblock_security_struct *sbsec = sb->s_security; + struct superblock_security_struct *sbsec = selinux_superblock(sb); /* * IMPORTANT: Double-check logic in this function when adding a new @@ -535,7 +528,7 @@ fallback: static int sb_finish_set_opts(struct super_block *sb) { - struct superblock_security_struct *sbsec = sb->s_security; + struct superblock_security_struct *sbsec = selinux_superblock(sb); struct dentry *root = sb->s_root; struct inode *root_inode = d_backing_inode(root); int rc = 0; @@ -626,7 +619,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, unsigned long *set_kern_flags) { const struct cred *cred = current_cred(); - struct superblock_security_struct *sbsec = sb->s_security; + struct superblock_security_struct *sbsec = selinux_superblock(sb); struct dentry *root = sb->s_root; struct selinux_mnt_opts *opts = mnt_opts; struct inode_security_struct *root_isec; @@ -863,8 +856,8 @@ out_double_mount: static int selinux_cmp_sb_context(const struct super_block *oldsb, const struct super_block *newsb) { - struct superblock_security_struct *old = oldsb->s_security; - struct superblock_security_struct *new = newsb->s_security; + struct superblock_security_struct *old = selinux_superblock(oldsb); + struct superblock_security_struct *new = selinux_superblock(newsb); char oldflags = old->flags & SE_MNTMASK; char newflags = new->flags & SE_MNTMASK; @@ -896,8 +889,9 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb, unsigned long *set_kern_flags) { int rc = 0; - const struct superblock_security_struct *oldsbsec = oldsb->s_security; - struct superblock_security_struct *newsbsec = newsb->s_security; + const struct superblock_security_struct *oldsbsec = + selinux_superblock(oldsb); + struct superblock_security_struct *newsbsec = selinux_superblock(newsb); int set_fscontext = (oldsbsec->flags & FSCONTEXT_MNT); int set_context = (oldsbsec->flags & CONTEXT_MNT); @@ -1076,7 +1070,7 @@ static int show_sid(struct seq_file *m, u32 sid) static int selinux_sb_show_options(struct seq_file *m, struct super_block *sb) { - struct superblock_security_struct *sbsec = sb->s_security; + struct superblock_security_struct *sbsec = selinux_superblock(sb); int rc; if (!(sbsec->flags & SE_SBINITIALIZED)) @@ -1427,7 +1421,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent if (isec->sclass == SECCLASS_FILE) isec->sclass = inode_mode_to_security_class(inode->i_mode); - sbsec = inode->i_sb->s_security; + sbsec = selinux_superblock(inode->i_sb); if (!(sbsec->flags & SE_SBINITIALIZED)) { /* Defer initialization until selinux_complete_init, after the initial policy is loaded and the security @@ -1778,7 +1772,8 @@ selinux_determine_inode_label(const struct task_security_struct *tsec, const struct qstr *name, u16 tclass, u32 *_new_isid) { - const struct superblock_security_struct *sbsec = dir->i_sb->s_security; + const struct superblock_security_struct *sbsec = + selinux_superblock(dir->i_sb); if ((sbsec->flags & SE_SBINITIALIZED) && (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) { @@ -1809,7 +1804,7 @@ static int may_create(struct inode *dir, int rc; dsec = inode_security(dir); - sbsec = dir->i_sb->s_security; + sbsec = selinux_superblock(dir->i_sb); sid = tsec->sid; @@ -1958,7 +1953,7 @@ static int superblock_has_perm(const struct cred *cred, struct superblock_security_struct *sbsec; u32 sid = cred_sid(cred); - sbsec = sb->s_security; + sbsec = selinux_superblock(sb); return avc_has_perm(&selinux_state, sid, sbsec->sid, SECCLASS_FILESYSTEM, perms, ad); } @@ -2587,11 +2582,7 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm) static int selinux_sb_alloc_security(struct super_block *sb) { - struct superblock_security_struct *sbsec; - - sbsec = kzalloc(sizeof(struct superblock_security_struct), GFP_KERNEL); - if (!sbsec) - return -ENOMEM; + struct superblock_security_struct *sbsec = selinux_superblock(sb); mutex_init(&sbsec->lock); INIT_LIST_HEAD(&sbsec->isec_head); @@ -2599,16 +2590,10 @@ static int selinux_sb_alloc_security(struct super_block *sb) sbsec->sid = SECINITSID_UNLABELED; sbsec->def_sid = SECINITSID_FILE; sbsec->mntpoint_sid = SECINITSID_UNLABELED; - sb->s_security = sbsec; return 0; } -static void selinux_sb_free_security(struct super_block *sb) -{ - superblock_free_security(sb); -} - static inline int opt_len(const char *s) { bool open_quote = false; @@ -2687,7 +2672,7 @@ free_opt: static int selinux_sb_remount(struct super_block *sb, void *mnt_opts) { struct selinux_mnt_opts *opts = mnt_opts; - struct superblock_security_struct *sbsec = sb->s_security; + struct superblock_security_struct *sbsec = selinux_superblock(sb); u32 sid; int rc; @@ -2925,7 +2910,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, int rc; char *context; - sbsec = dir->i_sb->s_security; + sbsec = selinux_superblock(dir->i_sb); newsid = tsec->create_sid; @@ -3227,7 +3212,7 @@ static int selinux_inode_setxattr(struct user_namespace *mnt_userns, if (!selinux_initialized(&selinux_state)) return (inode_owner_or_capable(mnt_userns, inode) ? 0 : -EPERM); - sbsec = inode->i_sb->s_security; + sbsec = selinux_superblock(inode->i_sb); if (!(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; @@ -3472,13 +3457,14 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct inode_security_struct *isec = inode_security_novalidate(inode); - struct superblock_security_struct *sbsec = inode->i_sb->s_security; + struct superblock_security_struct *sbsec; u32 newsid; int rc; if (strcmp(name, XATTR_SELINUX_SUFFIX)) return -EOPNOTSUPP; + sbsec = selinux_superblock(inode->i_sb); if (!(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; @@ -6975,6 +6961,7 @@ struct lsm_blob_sizes selinux_blob_sizes __lsm_ro_after_init = { .lbs_inode = sizeof(struct inode_security_struct), .lbs_ipc = sizeof(struct ipc_security_struct), .lbs_msg_msg = sizeof(struct msg_security_struct), + .lbs_superblock = sizeof(struct superblock_security_struct), }; #ifdef CONFIG_PERF_EVENTS @@ -7075,7 +7062,6 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(bprm_committing_creds, selinux_bprm_committing_creds), LSM_HOOK_INIT(bprm_committed_creds, selinux_bprm_committed_creds), - LSM_HOOK_INIT(sb_free_security, selinux_sb_free_security), LSM_HOOK_INIT(sb_free_mnt_opts, selinux_free_mnt_opts), LSM_HOOK_INIT(sb_remount, selinux_sb_remount), LSM_HOOK_INIT(sb_kern_mount, selinux_sb_kern_mount), diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index ca4d7ab6a835..2953132408bf 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -188,4 +188,10 @@ static inline u32 current_sid(void) return tsec->sid; } +static inline struct superblock_security_struct *selinux_superblock( + const struct super_block *superblock) +{ + return superblock->s_security + selinux_blob_sizes.lbs_superblock; +} + #endif /* _SELINUX_OBJSEC_H_ */ diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 3438d0130378..9cea2e6c809f 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include "flask.h" @@ -2875,7 +2876,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb) struct sidtab *sidtab; int rc = 0; struct ocontext *c; - struct superblock_security_struct *sbsec = sb->s_security; + struct superblock_security_struct *sbsec = selinux_superblock(sb); const char *fstype = sb->s_type->name; if (!selinux_initialized(state)) { diff --git a/security/smack/smack.h b/security/smack/smack.h index a9768b12716b..7077b18c79ec 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -357,6 +357,12 @@ static inline struct smack_known **smack_ipc(const struct kern_ipc_perm *ipc) return ipc->security + smack_blob_sizes.lbs_ipc; } +static inline struct superblock_smack *smack_superblock( + const struct super_block *superblock) +{ + return superblock->s_security + smack_blob_sizes.lbs_superblock; +} + /* * Is the directory transmuting? */ diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 12a45e61c1a5..ee3e29603c9c 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -535,12 +535,7 @@ static int smack_syslog(int typefrom_file) */ static int smack_sb_alloc_security(struct super_block *sb) { - struct superblock_smack *sbsp; - - sbsp = kzalloc(sizeof(struct superblock_smack), GFP_KERNEL); - - if (sbsp == NULL) - return -ENOMEM; + struct superblock_smack *sbsp = smack_superblock(sb); sbsp->smk_root = &smack_known_floor; sbsp->smk_default = &smack_known_floor; @@ -549,22 +544,10 @@ static int smack_sb_alloc_security(struct super_block *sb) /* * SMK_SB_INITIALIZED will be zero from kzalloc. */ - sb->s_security = sbsp; return 0; } -/** - * smack_sb_free_security - free a superblock blob - * @sb: the superblock getting the blob - * - */ -static void smack_sb_free_security(struct super_block *sb) -{ - kfree(sb->s_security); - sb->s_security = NULL; -} - struct smack_mnt_opts { const char *fsdefault, *fsfloor, *fshat, *fsroot, *fstransmute; }; @@ -772,7 +755,7 @@ static int smack_set_mnt_opts(struct super_block *sb, { struct dentry *root = sb->s_root; struct inode *inode = d_backing_inode(root); - struct superblock_smack *sp = sb->s_security; + struct superblock_smack *sp = smack_superblock(sb); struct inode_smack *isp; struct smack_known *skp; struct smack_mnt_opts *opts = mnt_opts; @@ -871,7 +854,7 @@ static int smack_set_mnt_opts(struct super_block *sb, */ static int smack_sb_statfs(struct dentry *dentry) { - struct superblock_smack *sbp = dentry->d_sb->s_security; + struct superblock_smack *sbp = smack_superblock(dentry->d_sb); int rc; struct smk_audit_info ad; @@ -905,7 +888,7 @@ static int smack_bprm_creds_for_exec(struct linux_binprm *bprm) if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task) return 0; - sbsp = inode->i_sb->s_security; + sbsp = smack_superblock(inode->i_sb); if ((sbsp->smk_flags & SMK_SB_UNTRUSTED) && isp->smk_task != sbsp->smk_root) return 0; @@ -1157,7 +1140,7 @@ static int smack_inode_rename(struct inode *old_inode, */ static int smack_inode_permission(struct inode *inode, int mask) { - struct superblock_smack *sbsp = inode->i_sb->s_security; + struct superblock_smack *sbsp = smack_superblock(inode->i_sb); struct smk_audit_info ad; int no_block = mask & MAY_NOT_BLOCK; int rc; @@ -1400,7 +1383,7 @@ static int smack_inode_removexattr(struct user_namespace *mnt_userns, */ if (strcmp(name, XATTR_NAME_SMACK) == 0) { struct super_block *sbp = dentry->d_sb; - struct superblock_smack *sbsp = sbp->s_security; + struct superblock_smack *sbsp = smack_superblock(sbp); isp->smk_inode = sbsp->smk_default; } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) @@ -1670,7 +1653,7 @@ static int smack_mmap_file(struct file *file, isp = smack_inode(file_inode(file)); if (isp->smk_mmap == NULL) return 0; - sbsp = file_inode(file)->i_sb->s_security; + sbsp = smack_superblock(file_inode(file)->i_sb); if (sbsp->smk_flags & SMK_SB_UNTRUSTED && isp->smk_mmap != sbsp->smk_root) return -EACCES; @@ -3285,7 +3268,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) return; sbp = inode->i_sb; - sbsp = sbp->s_security; + sbsp = smack_superblock(sbp); /* * We're going to use the superblock default label * if there's no label on the file. @@ -4700,6 +4683,7 @@ struct lsm_blob_sizes smack_blob_sizes __lsm_ro_after_init = { .lbs_inode = sizeof(struct inode_smack), .lbs_ipc = sizeof(struct smack_known *), .lbs_msg_msg = sizeof(struct smack_known *), + .lbs_superblock = sizeof(struct superblock_smack), }; static struct security_hook_list smack_hooks[] __lsm_ro_after_init = { @@ -4711,7 +4695,6 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(fs_context_parse_param, smack_fs_context_parse_param), LSM_HOOK_INIT(sb_alloc_security, smack_sb_alloc_security), - LSM_HOOK_INIT(sb_free_security, smack_sb_free_security), LSM_HOOK_INIT(sb_free_mnt_opts, smack_free_mnt_opts), LSM_HOOK_INIT(sb_eat_lsm_opts, smack_sb_eat_lsm_opts), LSM_HOOK_INIT(sb_statfs, smack_sb_statfs), -- cgit v1.2.3-71-gd317 From 83e804f0bfee2247b1c0aa64845c81a38562da7a Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Thu, 22 Apr 2021 17:41:16 +0200 Subject: fs,security: Add sb_delete hook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The sb_delete security hook is called when shutting down a superblock, which may be useful to release kernel objects tied to the superblock's lifetime (e.g. inodes). This new hook is needed by Landlock to release (ephemerally) tagged struct inodes. This comes from the unprivileged nature of Landlock described in the next commit. Cc: Al Viro Cc: James Morris Signed-off-by: Mickaël Salaün Reviewed-by: Jann Horn Acked-by: Serge Hallyn Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20210422154123.13086-7-mic@digikod.net Signed-off-by: James Morris --- fs/super.c | 1 + include/linux/lsm_hook_defs.h | 1 + include/linux/lsm_hooks.h | 3 +++ include/linux/security.h | 4 ++++ security/security.c | 5 +++++ 5 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/fs/super.c b/fs/super.c index 8c1baca35c16..11b7e7213fd1 100644 --- a/fs/super.c +++ b/fs/super.c @@ -454,6 +454,7 @@ void generic_shutdown_super(struct super_block *sb) evict_inodes(sb); /* only nonzero refcount inodes can have marks */ fsnotify_sb_delete(sb); + security_sb_delete(sb); if (sb->s_dio_done_wq) { destroy_workqueue(sb->s_dio_done_wq); diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 477a597db013..e8adadbf9581 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -59,6 +59,7 @@ LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, struct fs_parameter *param) LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb) +LSM_HOOK(void, LSM_RET_VOID, sb_delete, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts) LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 75715998a95f..cc2eaaaec0e4 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -108,6 +108,9 @@ * allocated. * @sb contains the super_block structure to be modified. * Return 0 if operation was successful. + * @sb_delete: + * Release objects tied to a superblock (e.g. inodes). + * @sb contains the super_block structure being released. * @sb_free_security: * Deallocate and clear the sb->s_security field. * @sb contains the super_block structure to be modified. diff --git a/include/linux/security.h b/include/linux/security.h index 8aeebd6646dc..90298baa4551 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -291,6 +291,7 @@ void security_bprm_committed_creds(struct linux_binprm *bprm); int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); int security_sb_alloc(struct super_block *sb); +void security_sb_delete(struct super_block *sb); void security_sb_free(struct super_block *sb); void security_free_mnt_opts(void **mnt_opts); int security_sb_eat_lsm_opts(char *options, void **mnt_opts); @@ -631,6 +632,9 @@ static inline int security_sb_alloc(struct super_block *sb) return 0; } +static inline void security_sb_delete(struct super_block *sb) +{ } + static inline void security_sb_free(struct super_block *sb) { } diff --git a/security/security.c b/security/security.c index e9c29480eb18..bb666f992497 100644 --- a/security/security.c +++ b/security/security.c @@ -900,6 +900,11 @@ int security_sb_alloc(struct super_block *sb) return rc; } +void security_sb_delete(struct super_block *sb) +{ + call_void_hook(sb_delete, sb); +} + void security_sb_free(struct super_block *sb) { call_void_hook(sb_free_security, sb); -- cgit v1.2.3-71-gd317 From 265885daf3e5082eb9f6e2a23bdbf9ba4456a21b Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Thu, 22 Apr 2021 17:41:18 +0200 Subject: landlock: Add syscall implementations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These 3 system calls are designed to be used by unprivileged processes to sandbox themselves: * landlock_create_ruleset(2): Creates a ruleset and returns its file descriptor. * landlock_add_rule(2): Adds a rule (e.g. file hierarchy access) to a ruleset, identified by the dedicated file descriptor. * landlock_restrict_self(2): Enforces a ruleset on the calling thread and its future children (similar to seccomp). This syscall has the same usage restrictions as seccomp(2): the caller must have the no_new_privs attribute set or have CAP_SYS_ADMIN in the current user namespace. All these syscalls have a "flags" argument (not currently used) to enable extensibility. Here are the motivations for these new syscalls: * A sandboxed process may not have access to file systems, including /dev, /sys or /proc, but it should still be able to add more restrictions to itself. * Neither prctl(2) nor seccomp(2) (which was used in a previous version) fit well with the current definition of a Landlock security policy. All passed structs (attributes) are checked at build time to ensure that they don't contain holes and that they are aligned the same way for each architecture. See the user and kernel documentation for more details (provided by a following commit): * Documentation/userspace-api/landlock.rst * Documentation/security/landlock.rst Cc: Arnd Bergmann Cc: James Morris Cc: Jann Horn Cc: Kees Cook Signed-off-by: Mickaël Salaün Acked-by: Serge Hallyn Link: https://lore.kernel.org/r/20210422154123.13086-9-mic@digikod.net Signed-off-by: James Morris --- include/linux/syscalls.h | 7 + include/uapi/linux/landlock.h | 53 +++++ kernel/sys_ni.c | 5 + security/landlock/Makefile | 2 +- security/landlock/syscalls.c | 442 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 508 insertions(+), 1 deletion(-) create mode 100644 security/landlock/syscalls.c (limited to 'include/linux') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2839dc9a7c01..fa3971012e1c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -69,6 +69,8 @@ struct io_uring_params; struct clone_args; struct open_how; struct mount_attr; +struct landlock_ruleset_attr; +enum landlock_rule_type; #include #include @@ -1041,6 +1043,11 @@ asmlinkage long sys_pidfd_send_signal(int pidfd, int sig, siginfo_t __user *info, unsigned int flags); asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags); +asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr __user *attr, + size_t size, __u32 flags); +asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type, + const void __user *rule_attr, __u32 flags); +asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags); /* * Architecture-specific system calls diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index b1a81b5a8b86..ba946a1e40b2 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -9,6 +9,59 @@ #ifndef _UAPI_LINUX_LANDLOCK_H #define _UAPI_LINUX_LANDLOCK_H +#include + +/** + * struct landlock_ruleset_attr - Ruleset definition + * + * Argument of sys_landlock_create_ruleset(). This structure can grow in + * future versions. + */ +struct landlock_ruleset_attr { + /** + * @handled_access_fs: Bitmask of actions (cf. `Filesystem flags`_) + * that is handled by this ruleset and should then be forbidden if no + * rule explicitly allow them. This is needed for backward + * compatibility reasons. + */ + __u64 handled_access_fs; +}; + +/** + * enum landlock_rule_type - Landlock rule type + * + * Argument of sys_landlock_add_rule(). + */ +enum landlock_rule_type { + /** + * @LANDLOCK_RULE_PATH_BENEATH: Type of a &struct + * landlock_path_beneath_attr . + */ + LANDLOCK_RULE_PATH_BENEATH = 1, +}; + +/** + * struct landlock_path_beneath_attr - Path hierarchy definition + * + * Argument of sys_landlock_add_rule(). + */ +struct landlock_path_beneath_attr { + /** + * @allowed_access: Bitmask of allowed actions for this file hierarchy + * (cf. `Filesystem flags`_). + */ + __u64 allowed_access; + /** + * @parent_fd: File descriptor, open with ``O_PATH``, which identifies + * the parent directory of a file hierarchy, or just a file. + */ + __s32 parent_fd; + /* + * This struct is packed to avoid trailing reserved members. + * Cf. security/landlock/syscalls.c:build_check_abi() + */ +} __attribute__((packed)); + /** * DOC: fs_access * diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 19aa806890d5..cce430cf2ff2 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -266,6 +266,11 @@ COND_SYSCALL(request_key); COND_SYSCALL(keyctl); COND_SYSCALL_COMPAT(keyctl); +/* security/landlock/syscalls.c */ +COND_SYSCALL(landlock_create_ruleset); +COND_SYSCALL(landlock_add_rule); +COND_SYSCALL(landlock_restrict_self); + /* arch/example/kernel/sys_example.c */ /* mm/fadvise.c */ diff --git a/security/landlock/Makefile b/security/landlock/Makefile index 92e3d80ab8ed..7bbd2f413b3e 100644 --- a/security/landlock/Makefile +++ b/security/landlock/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_SECURITY_LANDLOCK) := landlock.o -landlock-y := setup.o object.o ruleset.o \ +landlock-y := setup.o syscalls.o object.o ruleset.o \ cred.o ptrace.o fs.o diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c new file mode 100644 index 000000000000..93620ad7593b --- /dev/null +++ b/security/landlock/syscalls.c @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Landlock LSM - System call implementations and user space interfaces + * + * Copyright © 2016-2020 Mickaël Salaün + * Copyright © 2018-2020 ANSSI + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cred.h" +#include "fs.h" +#include "limits.h" +#include "ruleset.h" +#include "setup.h" + +/** + * copy_min_struct_from_user - Safe future-proof argument copying + * + * Extend copy_struct_from_user() to check for consistent user buffer. + * + * @dst: Kernel space pointer or NULL. + * @ksize: Actual size of the data pointed to by @dst. + * @ksize_min: Minimal required size to be copied. + * @src: User space pointer or NULL. + * @usize: (Alleged) size of the data pointed to by @src. + */ +static __always_inline int copy_min_struct_from_user(void *const dst, + const size_t ksize, const size_t ksize_min, + const void __user *const src, const size_t usize) +{ + /* Checks buffer inconsistencies. */ + BUILD_BUG_ON(!dst); + if (!src) + return -EFAULT; + + /* Checks size ranges. */ + BUILD_BUG_ON(ksize <= 0); + BUILD_BUG_ON(ksize < ksize_min); + if (usize < ksize_min) + return -EINVAL; + if (usize > PAGE_SIZE) + return -E2BIG; + + /* Copies user buffer and fills with zeros. */ + return copy_struct_from_user(dst, ksize, src, usize); +} + +/* + * This function only contains arithmetic operations with constants, leading to + * BUILD_BUG_ON(). The related code is evaluated and checked at build time, + * but it is then ignored thanks to compiler optimizations. + */ +static void build_check_abi(void) +{ + struct landlock_ruleset_attr ruleset_attr; + struct landlock_path_beneath_attr path_beneath_attr; + size_t ruleset_size, path_beneath_size; + + /* + * For each user space ABI structures, first checks that there is no + * hole in them, then checks that all architectures have the same + * struct size. + */ + ruleset_size = sizeof(ruleset_attr.handled_access_fs); + BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size); + BUILD_BUG_ON(sizeof(ruleset_attr) != 8); + + path_beneath_size = sizeof(path_beneath_attr.allowed_access); + path_beneath_size += sizeof(path_beneath_attr.parent_fd); + BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size); + BUILD_BUG_ON(sizeof(path_beneath_attr) != 12); +} + +/* Ruleset handling */ + +static int fop_ruleset_release(struct inode *const inode, + struct file *const filp) +{ + struct landlock_ruleset *ruleset = filp->private_data; + + landlock_put_ruleset(ruleset); + return 0; +} + +static ssize_t fop_dummy_read(struct file *const filp, char __user *const buf, + const size_t size, loff_t *const ppos) +{ + /* Dummy handler to enable FMODE_CAN_READ. */ + return -EINVAL; +} + +static ssize_t fop_dummy_write(struct file *const filp, + const char __user *const buf, const size_t size, + loff_t *const ppos) +{ + /* Dummy handler to enable FMODE_CAN_WRITE. */ + return -EINVAL; +} + +/* + * A ruleset file descriptor enables to build a ruleset by adding (i.e. + * writing) rule after rule, without relying on the task's context. This + * reentrant design is also used in a read way to enforce the ruleset on the + * current task. + */ +static const struct file_operations ruleset_fops = { + .release = fop_ruleset_release, + .read = fop_dummy_read, + .write = fop_dummy_write, +}; + +/** + * sys_landlock_create_ruleset - Create a new ruleset + * + * @attr: Pointer to a &struct landlock_ruleset_attr identifying the scope of + * the new ruleset. + * @size: Size of the pointed &struct landlock_ruleset_attr (needed for + * backward and forward compatibility). + * @flags: Must be 0. + * + * This system call enables to create a new Landlock ruleset, and returns the + * related file descriptor on success. + * + * Possible returned errors are: + * + * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; + * - EINVAL: @flags is not 0, or unknown access, or too small @size; + * - E2BIG or EFAULT: @attr or @size inconsistencies; + * - ENOMSG: empty &landlock_ruleset_attr.handled_access_fs. + */ +SYSCALL_DEFINE3(landlock_create_ruleset, + const struct landlock_ruleset_attr __user *const, attr, + const size_t, size, const __u32, flags) +{ + struct landlock_ruleset_attr ruleset_attr; + struct landlock_ruleset *ruleset; + int err, ruleset_fd; + + /* Build-time checks. */ + build_check_abi(); + + if (!landlock_initialized) + return -EOPNOTSUPP; + + /* No flag for now. */ + if (flags) + return -EINVAL; + + /* Copies raw user space buffer. */ + err = copy_min_struct_from_user(&ruleset_attr, sizeof(ruleset_attr), + offsetofend(typeof(ruleset_attr), handled_access_fs), + attr, size); + if (err) + return err; + + /* Checks content (and 32-bits cast). */ + if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) != + LANDLOCK_MASK_ACCESS_FS) + return -EINVAL; + + /* Checks arguments and transforms to kernel struct. */ + ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + /* Creates anonymous FD referring to the ruleset. */ + ruleset_fd = anon_inode_getfd("landlock-ruleset", &ruleset_fops, + ruleset, O_RDWR | O_CLOEXEC); + if (ruleset_fd < 0) + landlock_put_ruleset(ruleset); + return ruleset_fd; +} + +/* + * Returns an owned ruleset from a FD. It is thus needed to call + * landlock_put_ruleset() on the return value. + */ +static struct landlock_ruleset *get_ruleset_from_fd(const int fd, + const fmode_t mode) +{ + struct fd ruleset_f; + struct landlock_ruleset *ruleset; + + ruleset_f = fdget(fd); + if (!ruleset_f.file) + return ERR_PTR(-EBADF); + + /* Checks FD type and access right. */ + if (ruleset_f.file->f_op != &ruleset_fops) { + ruleset = ERR_PTR(-EBADFD); + goto out_fdput; + } + if (!(ruleset_f.file->f_mode & mode)) { + ruleset = ERR_PTR(-EPERM); + goto out_fdput; + } + ruleset = ruleset_f.file->private_data; + if (WARN_ON_ONCE(ruleset->num_layers != 1)) { + ruleset = ERR_PTR(-EINVAL); + goto out_fdput; + } + landlock_get_ruleset(ruleset); + +out_fdput: + fdput(ruleset_f); + return ruleset; +} + +/* Path handling */ + +/* + * @path: Must call put_path(@path) after the call if it succeeded. + */ +static int get_path_from_fd(const s32 fd, struct path *const path) +{ + struct fd f; + int err = 0; + + BUILD_BUG_ON(!__same_type(fd, + ((struct landlock_path_beneath_attr *)NULL)->parent_fd)); + + /* Handles O_PATH. */ + f = fdget_raw(fd); + if (!f.file) + return -EBADF; + /* + * Forbids ruleset FDs, internal filesystems (e.g. nsfs), including + * pseudo filesystems that will never be mountable (e.g. sockfs, + * pipefs). + */ + if ((f.file->f_op == &ruleset_fops) || + (f.file->f_path.mnt->mnt_flags & MNT_INTERNAL) || + (f.file->f_path.dentry->d_sb->s_flags & SB_NOUSER) || + d_is_negative(f.file->f_path.dentry) || + IS_PRIVATE(d_backing_inode(f.file->f_path.dentry))) { + err = -EBADFD; + goto out_fdput; + } + *path = f.file->f_path; + path_get(path); + +out_fdput: + fdput(f); + return err; +} + +/** + * sys_landlock_add_rule - Add a new rule to a ruleset + * + * @ruleset_fd: File descriptor tied to the ruleset that should be extended + * with the new rule. + * @rule_type: Identify the structure type pointed to by @rule_attr (only + * LANDLOCK_RULE_PATH_BENEATH for now). + * @rule_attr: Pointer to a rule (only of type &struct + * landlock_path_beneath_attr for now). + * @flags: Must be 0. + * + * This system call enables to define a new rule and add it to an existing + * ruleset. + * + * Possible returned errors are: + * + * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; + * - EINVAL: @flags is not 0, or inconsistent access in the rule (i.e. + * &landlock_path_beneath_attr.allowed_access is not a subset of the rule's + * accesses); + * - ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access); + * - EBADF: @ruleset_fd is not a file descriptor for the current thread, or a + * member of @rule_attr is not a file descriptor as expected; + * - EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of + * @rule_attr is not the expected file descriptor type (e.g. file open + * without O_PATH); + * - EPERM: @ruleset_fd has no write access to the underlying ruleset; + * - EFAULT: @rule_attr inconsistency. + */ +SYSCALL_DEFINE4(landlock_add_rule, + const int, ruleset_fd, const enum landlock_rule_type, rule_type, + const void __user *const, rule_attr, const __u32, flags) +{ + struct landlock_path_beneath_attr path_beneath_attr; + struct path path; + struct landlock_ruleset *ruleset; + int res, err; + + if (!landlock_initialized) + return -EOPNOTSUPP; + + /* No flag for now. */ + if (flags) + return -EINVAL; + + if (rule_type != LANDLOCK_RULE_PATH_BENEATH) + return -EINVAL; + + /* Copies raw user space buffer, only one type for now. */ + res = copy_from_user(&path_beneath_attr, rule_attr, + sizeof(path_beneath_attr)); + if (res) + return -EFAULT; + + /* Gets and checks the ruleset. */ + ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_WRITE); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + /* + * Informs about useless rule: empty allowed_access (i.e. deny rules) + * are ignored in path walks. + */ + if (!path_beneath_attr.allowed_access) { + err = -ENOMSG; + goto out_put_ruleset; + } + /* + * Checks that allowed_access matches the @ruleset constraints + * (ruleset->fs_access_masks[0] is automatically upgraded to 64-bits). + */ + if ((path_beneath_attr.allowed_access | ruleset->fs_access_masks[0]) != + ruleset->fs_access_masks[0]) { + err = -EINVAL; + goto out_put_ruleset; + } + + /* Gets and checks the new rule. */ + err = get_path_from_fd(path_beneath_attr.parent_fd, &path); + if (err) + goto out_put_ruleset; + + /* Imports the new rule. */ + err = landlock_append_fs_rule(ruleset, &path, + path_beneath_attr.allowed_access); + path_put(&path); + +out_put_ruleset: + landlock_put_ruleset(ruleset); + return err; +} + +/* Enforcement */ + +/** + * sys_landlock_restrict_self - Enforce a ruleset on the calling thread + * + * @ruleset_fd: File descriptor tied to the ruleset to merge with the target. + * @flags: Must be 0. + * + * This system call enables to enforce a Landlock ruleset on the current + * thread. Enforcing a ruleset requires that the task has CAP_SYS_ADMIN in its + * namespace or is running with no_new_privs. This avoids scenarios where + * unprivileged tasks can affect the behavior of privileged children. + * + * Possible returned errors are: + * + * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; + * - EINVAL: @flags is not 0. + * - EBADF: @ruleset_fd is not a file descriptor for the current thread; + * - EBADFD: @ruleset_fd is not a ruleset file descriptor; + * - EPERM: @ruleset_fd has no read access to the underlying ruleset, or the + * current thread is not running with no_new_privs, or it doesn't have + * CAP_SYS_ADMIN in its namespace. + * - E2BIG: The maximum number of stacked rulesets is reached for the current + * thread. + */ +SYSCALL_DEFINE2(landlock_restrict_self, + const int, ruleset_fd, const __u32, flags) +{ + struct landlock_ruleset *new_dom, *ruleset; + struct cred *new_cred; + struct landlock_cred_security *new_llcred; + int err; + + if (!landlock_initialized) + return -EOPNOTSUPP; + + /* No flag for now. */ + if (flags) + return -EINVAL; + + /* + * Similar checks as for seccomp(2), except that an -EPERM may be + * returned. + */ + if (!task_no_new_privs(current) && + !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) + return -EPERM; + + /* Gets and checks the ruleset. */ + ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + /* Prepares new credentials. */ + new_cred = prepare_creds(); + if (!new_cred) { + err = -ENOMEM; + goto out_put_ruleset; + } + new_llcred = landlock_cred(new_cred); + + /* + * There is no possible race condition while copying and manipulating + * the current credentials because they are dedicated per thread. + */ + new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset); + if (IS_ERR(new_dom)) { + err = PTR_ERR(new_dom); + goto out_put_creds; + } + + /* Replaces the old (prepared) domain. */ + landlock_put_ruleset(new_llcred->domain); + new_llcred->domain = new_dom; + + landlock_put_ruleset(ruleset); + return commit_creds(new_cred); + +out_put_creds: + abort_creds(new_cred); + +out_put_ruleset: + landlock_put_ruleset(ruleset); + return err; +} -- cgit v1.2.3-71-gd317 From 81dd4d4d6178306ab31db91bdc7353d485bdafce Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Sat, 24 Apr 2021 10:04:15 -0500 Subject: dmaengine: idxd: Add IDXD performance monitor support Implement the IDXD performance monitor capability (named 'perfmon' in the DSA (Data Streaming Accelerator) spec [1]), which supports the collection of information about key events occurring during DSA and IAX (Intel Analytics Accelerator) device execution, to assist in performance tuning and debugging. The idxd perfmon support is implemented as part of the IDXD driver and interfaces with the Linux perf framework. It has several features in common with the existing uncore pmu support: - it does not support sampling - does not support per-thread counting However it also has some unique features not present in the core and uncore support: - all general-purpose counters are identical, thus no event constraints - operation is always system-wide While the core perf subsystem assumes that all counters are by default per-cpu, the uncore pmus are socket-scoped and use a cpu mask to restrict counting to one cpu from each socket. IDXD counters use a similar strategy but expand the scope even further; since IDXD counters are system-wide and can be read from any cpu, the IDXD perf driver picks a single cpu to do the work (with cpu hotplug notifiers to choose a different cpu if the chosen one is taken off-line). More specifically, the perf userspace tool by default opens a counter for each cpu for an event. However, if it finds a cpumask file associated with the pmu under sysfs, as is the case with the uncore pmus, it will open counters only on the cpus specified by the cpumask. Since perfmon only needs to open a single counter per event for a given IDXD device, the perfmon driver will create a sysfs cpumask file for the device and insert the first cpu of the system into it. When a user uses perf to open an event, perf will open a single counter on the cpu specified by the cpu mask. This amounts to the default system-wide rather than per-cpu counting mentioned previously for perfmon pmu events. In order to keep the cpu mask up-to-date, the driver implements cpu hotplug support for multiple devices, as IDXD usually enumerates and registers more than one idxd device. The perfmon driver implements basic perfmon hardware capability discovery and configuration, and is initialized by the IDXD driver's probe function. During initialization, the driver retrieves the total number of supported performance counters, the pmu ID, and the device type from idxd device, and registers itself under the Linux perf framework. The perf userspace tool can be used to monitor single or multiple events depending on the given configuration, as well as event groups, which are also supported by the perfmon driver. The user configures events using the perf tool command-line interface by specifying the event and corresponding event category, along with an optional set of filters that can be used to restrict counting to specific work queues, traffic classes, page and transfer sizes, and engines (See [1] for specifics). With the configuration specified by the user, the perf tool issues a system call passing that information to the kernel, which uses it to initialize the specified event(s). The event(s) are opened and started, and following termination of the perf command, they're stopped. At that point, the perfmon driver will read the latest count for the event(s), calculate the difference between the latest counter values and previously tracked counter values, and display the final incremental count as the event count for the cycle. An overflow handler registered on the IDXD irq path is used to account for counter overflows, which are signaled by an overflow interrupt. Below are a couple of examples of perf usage for monitoring DSA events. The following monitors all events in the 'engine' category. Becuuse no filters are specified, this captures all engine events for the workload, which in this case is 19 iterations of the work generated by the kernel dmatest module. Details describing the events can be found in Appendix D of [1], Performance Monitoring Events, but briefly they are: event 0x1: total input data processed, in 32-byte units event 0x2: total data written, in 32-byte units event 0x4: number of work descriptors that read the source event 0x8: number of work descriptors that write the destination event 0x10: number of work descriptors dispatched from batch descriptors event 0x20: number of work descriptors dispatched from work queues # perf stat -e dsa0/event=0x1,event_category=0x1/, dsa0/event=0x2,event_category=0x1/, dsa0/event=0x4,event_category=0x1/, dsa0/event=0x8,event_category=0x1/, dsa0/event=0x10,event_category=0x1/, dsa0/event=0x20,event_category=0x1/ modprobe dmatest channel=dma0chan0 timeout=2000 iterations=19 run=1 wait=1 Performance counter stats for 'system wide': 5,332 dsa0/event=0x1,event_category=0x1/ 5,327 dsa0/event=0x2,event_category=0x1/ 19 dsa0/event=0x4,event_category=0x1/ 19 dsa0/event=0x8,event_category=0x1/ 0 dsa0/event=0x10,event_category=0x1/ 19 dsa0/event=0x20,event_category=0x1/ 21.977436186 seconds time elapsed The command below illustrates filter usage with a simple example. It specifies that MEM_MOVE operations should be counted for the DSA device dsa0 (event 0x8 corresponds to the EV_MEM_MOVE event - Number of Memory Move Descriptors, which is part of event category 0x3 - Operations. The detailed category and event IDs are available in Appendix D, Performance Monitoring Events, of [1]). In addition to the event and event category, a number of filters are also specified (the detailed filter values are available in Chapter 6.4 (Filter Support) of [1]), which will restrict counting to only those events that meet all of the filter criteria. In this case, the filters specify that only MEM_MOVE operations that are serviced by work queue wq0 and specifically engine number engine0 and traffic class tc0 having sizes between 0 and 4k and page size of between 0 and 1G result in a counter hit; anything else will be filtered out and not appear in the final count. Note that filters are optional - any filter not specified is assumed to be all ones and will pass anything. # perf stat -e dsa0/filter_wq=0x1,filter_tc=0x1,filter_sz=0x7, filter_eng=0x1,event=0x8,event_category=0x3/ modprobe dmatest channel=dma0chan0 timeout=2000 iterations=19 run=1 wait=1 Performance counter stats for 'system wide': 19 dsa0/filter_wq=0x1,filter_tc=0x1,filter_sz=0x7, filter_eng=0x1,event=0x8,event_category=0x3/ 21.865914091 seconds time elapsed The output above reflects that the unspecified workload resulted in the counting of 19 MEM_MOVE operation events that met the filter criteria. [1]: https://software.intel.com/content/www/us/en/develop/download/intel-data-streaming-accelerator-preliminary-architecture-specification.html [ Based on work originally by Jing Lin. ] Reviewed-by: Dave Jiang Reviewed-by: Kan Liang Signed-off-by: Tom Zanussi Link: https://lore.kernel.org/r/0c5080a7d541904c4ad42b848c76a1ce056ddac7.1619276133.git.zanussi@kernel.org Signed-off-by: Vinod Koul --- .../ABI/testing/sysfs-bus-event_source-devices-dsa | 30 + drivers/dma/Kconfig | 12 + drivers/dma/idxd/Makefile | 2 + drivers/dma/idxd/idxd.h | 45 ++ drivers/dma/idxd/perfmon.c | 662 +++++++++++++++++++++ drivers/dma/idxd/perfmon.h | 119 ++++ drivers/dma/idxd/registers.h | 108 ++++ include/linux/cpuhotplug.h | 1 + 8 files changed, 979 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-bus-event_source-devices-dsa create mode 100644 drivers/dma/idxd/perfmon.c create mode 100644 drivers/dma/idxd/perfmon.h (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-dsa b/Documentation/ABI/testing/sysfs-bus-event_source-devices-dsa new file mode 100644 index 000000000000..3c7d132281b0 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-dsa @@ -0,0 +1,30 @@ +What: /sys/bus/event_source/devices/dsa*/format +Date: April 2021 +KernelVersion: 5.13 +Contact: Tom Zanussi +Description: Read-only. Attribute group to describe the magic bits + that go into perf_event_attr.config or + perf_event_attr.config1 for the IDXD DSA pmu. (See also + ABI/testing/sysfs-bus-event_source-devices-format). + + Each attribute in this group defines a bit range in + perf_event_attr.config or perf_event_attr.config1. + All supported attributes are listed below (See the + IDXD DSA Spec for possible attribute values):: + + event_category = "config:0-3" - event category + event = "config:4-31" - event ID + + filter_wq = "config1:0-31" - workqueue filter + filter_tc = "config1:32-39" - traffic class filter + filter_pgsz = "config1:40-43" - page size filter + filter_sz = "config1:44-51" - transfer size filter + filter_eng = "config1:52-59" - engine filter + +What: /sys/bus/event_source/devices/dsa*/cpumask +Date: April 2021 +KernelVersion: 5.13 +Contact: Tom Zanussi +Description: Read-only. This file always returns the cpu to which the + IDXD DSA pmu is bound for access to all dsa pmu + performance monitoring events. diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 0c2827fd8c19..b417217c148c 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -300,6 +300,18 @@ config INTEL_IDXD_SVM depends on PCI_PASID depends on PCI_IOV +config INTEL_IDXD_PERFMON + bool "Intel Data Accelerators performance monitor support" + depends on INTEL_IDXD + help + Enable performance monitor (pmu) support for the Intel(R) + data accelerators present in Intel Xeon CPU. With this + enabled, perf can be used to monitor the DSA (Intel Data + Streaming Accelerator) events described in the Intel DSA + spec. + + If unsure, say N. + config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86_64 diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile index 8978b898d777..6d11558756f8 100644 --- a/drivers/dma/idxd/Makefile +++ b/drivers/dma/idxd/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_INTEL_IDXD) += idxd.o idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o + +idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 97c96ca6ab70..26482c7d4c3a 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include "registers.h" #define IDXD_DRIVER_VERSION "1.00" @@ -29,6 +31,7 @@ enum idxd_type { }; #define IDXD_NAME_SIZE 128 +#define IDXD_PMU_EVENT_MAX 64 struct idxd_device_driver { struct device_driver drv; @@ -61,6 +64,31 @@ struct idxd_group { int tc_b; }; +struct idxd_pmu { + struct idxd_device *idxd; + + struct perf_event *event_list[IDXD_PMU_EVENT_MAX]; + int n_events; + + DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX); + + struct pmu pmu; + char name[IDXD_NAME_SIZE]; + int cpu; + + int n_counters; + int counter_width; + int n_event_categories; + + bool per_counter_caps_supported; + unsigned long supported_event_categories; + + unsigned long supported_filters; + int n_filters; + + struct hlist_node cpuhp_node; +}; + #define IDXD_MAX_PRIORITY 0xf enum idxd_wq_state { @@ -241,6 +269,8 @@ struct idxd_device { struct work_struct work; int *int_handles; + + struct idxd_pmu *idxd_pmu; }; /* IDXD software descriptor */ @@ -437,4 +467,19 @@ int idxd_cdev_get_major(struct idxd_device *idxd); int idxd_wq_add_cdev(struct idxd_wq *wq); void idxd_wq_del_cdev(struct idxd_wq *wq); +/* perfmon */ +#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON) +int perfmon_pmu_init(struct idxd_device *idxd); +void perfmon_pmu_remove(struct idxd_device *idxd); +void perfmon_counter_overflow(struct idxd_device *idxd); +void perfmon_init(void); +void perfmon_exit(void); +#else +static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; } +static inline void perfmon_pmu_remove(struct idxd_device *idxd) {} +static inline void perfmon_counter_overflow(struct idxd_device *idxd) {} +static inline void perfmon_init(void) {} +static inline void perfmon_exit(void) {} +#endif + #endif diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c new file mode 100644 index 000000000000..d73004f47cf4 --- /dev/null +++ b/drivers/dma/idxd/perfmon.c @@ -0,0 +1,662 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */ + +#include +#include +#include "idxd.h" +#include "perfmon.h" + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf); + +static cpumask_t perfmon_dsa_cpu_mask; +static bool cpuhp_set_up; +static enum cpuhp_state cpuhp_slot; + +/* + * perf userspace reads this attribute to determine which cpus to open + * counters on. It's connected to perfmon_dsa_cpu_mask, which is + * maintained by the cpu hotplug handlers. + */ +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *perfmon_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group cpumask_attr_group = { + .attrs = perfmon_cpumask_attrs, +}; + +/* + * These attributes specify the bits in the config word that the perf + * syscall uses to pass the event ids and categories to perfmon. + */ +DEFINE_PERFMON_FORMAT_ATTR(event_category, "config:0-3"); +DEFINE_PERFMON_FORMAT_ATTR(event, "config:4-31"); + +/* + * These attributes specify the bits in the config1 word that the perf + * syscall uses to pass filter data to perfmon. + */ +DEFINE_PERFMON_FORMAT_ATTR(filter_wq, "config1:0-31"); +DEFINE_PERFMON_FORMAT_ATTR(filter_tc, "config1:32-39"); +DEFINE_PERFMON_FORMAT_ATTR(filter_pgsz, "config1:40-43"); +DEFINE_PERFMON_FORMAT_ATTR(filter_sz, "config1:44-51"); +DEFINE_PERFMON_FORMAT_ATTR(filter_eng, "config1:52-59"); + +#define PERFMON_FILTERS_START 2 +#define PERFMON_FILTERS_MAX 5 + +static struct attribute *perfmon_format_attrs[] = { + &format_attr_idxd_event_category.attr, + &format_attr_idxd_event.attr, + &format_attr_idxd_filter_wq.attr, + &format_attr_idxd_filter_tc.attr, + &format_attr_idxd_filter_pgsz.attr, + &format_attr_idxd_filter_sz.attr, + &format_attr_idxd_filter_eng.attr, + NULL, +}; + +static struct attribute_group perfmon_format_attr_group = { + .name = "format", + .attrs = perfmon_format_attrs, +}; + +static const struct attribute_group *perfmon_attr_groups[] = { + &perfmon_format_attr_group, + &cpumask_attr_group, + NULL, +}; + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask); +} + +static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event) +{ + return &idxd_pmu->pmu == event->pmu; +} + +static int perfmon_collect_events(struct idxd_pmu *idxd_pmu, + struct perf_event *leader, + bool do_grp) +{ + struct perf_event *event; + int n, max_count; + + max_count = idxd_pmu->n_counters; + n = idxd_pmu->n_events; + + if (n >= max_count) + return -EINVAL; + + if (is_idxd_event(idxd_pmu, leader)) { + idxd_pmu->event_list[n] = leader; + idxd_pmu->event_list[n]->hw.idx = n; + n++; + } + + if (!do_grp) + return n; + + for_each_sibling_event(event, leader) { + if (!is_idxd_event(idxd_pmu, event) || + event->state <= PERF_EVENT_STATE_OFF) + continue; + + if (n >= max_count) + return -EINVAL; + + idxd_pmu->event_list[n] = event; + idxd_pmu->event_list[n]->hw.idx = n; + n++; + } + + return n; +} + +static void perfmon_assign_hw_event(struct idxd_pmu *idxd_pmu, + struct perf_event *event, int idx) +{ + struct idxd_device *idxd = idxd_pmu->idxd; + struct hw_perf_event *hwc = &event->hw; + + hwc->idx = idx; + hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx)); + hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx)); +} + +static int perfmon_assign_event(struct idxd_pmu *idxd_pmu, + struct perf_event *event) +{ + int i; + + for (i = 0; i < IDXD_PMU_EVENT_MAX; i++) + if (!test_and_set_bit(i, idxd_pmu->used_mask)) + return i; + + return -EINVAL; +} + +/* + * Check whether there are enough counters to satisfy that all the + * events in the group can actually be scheduled at the same time. + * + * To do this, create a fake idxd_pmu object so the event collection + * and assignment functions can be used without affecting the internal + * state of the real idxd_pmu object. + */ +static int perfmon_validate_group(struct idxd_pmu *pmu, + struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct idxd_pmu *fake_pmu; + int i, ret = 0, n, idx; + + fake_pmu = kzalloc(sizeof(*fake_pmu), GFP_KERNEL); + if (!fake_pmu) + return -ENOMEM; + + fake_pmu->pmu.name = pmu->pmu.name; + fake_pmu->n_counters = pmu->n_counters; + + n = perfmon_collect_events(fake_pmu, leader, true); + if (n < 0) { + ret = n; + goto out; + } + + fake_pmu->n_events = n; + n = perfmon_collect_events(fake_pmu, event, false); + if (n < 0) { + ret = n; + goto out; + } + + fake_pmu->n_events = n; + + for (i = 0; i < n; i++) { + event = fake_pmu->event_list[i]; + + idx = perfmon_assign_event(fake_pmu, event); + if (idx < 0) { + ret = idx; + goto out; + } + } +out: + kfree(fake_pmu); + + return ret; +} + +static int perfmon_pmu_event_init(struct perf_event *event) +{ + struct idxd_device *idxd; + int ret = 0; + + idxd = event_to_idxd(event); + event->hw.idx = -1; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* sampling not supported */ + if (event->attr.sample_period) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + + if (event->pmu != &idxd->idxd_pmu->pmu) + return -EINVAL; + + event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd)); + event->cpu = idxd->idxd_pmu->cpu; + event->hw.config = event->attr.config; + + if (event->group_leader != event) + /* non-group events have themselves as leader */ + ret = perfmon_validate_group(idxd->idxd_pmu, event); + + return ret; +} + +static inline u64 perfmon_pmu_read_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct idxd_device *idxd; + int cntr = hwc->idx; + + idxd = event_to_idxd(event); + + return ioread64(CNTRDATA_REG(idxd, cntr)); +} + +static void perfmon_pmu_event_update(struct perf_event *event) +{ + struct idxd_device *idxd = event_to_idxd(event); + u64 prev_raw_count, new_raw_count, delta, p, n; + int shift = 64 - idxd->idxd_pmu->counter_width; + struct hw_perf_event *hwc = &event->hw; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = perfmon_pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + + n = (new_raw_count << shift); + p = (prev_raw_count << shift); + + delta = ((n - p) >> shift); + + local64_add(delta, &event->count); +} + +void perfmon_counter_overflow(struct idxd_device *idxd) +{ + int i, n_counters, max_loop = OVERFLOW_SIZE; + struct perf_event *event; + unsigned long ovfstatus; + + n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE); + + ovfstatus = ioread32(OVFSTATUS_REG(idxd)); + + /* + * While updating overflowed counters, other counters behind + * them could overflow and be missed in a given pass. + * Normally this could happen at most n_counters times, but in + * theory a tiny counter width could result in continual + * overflows and endless looping. max_loop provides a + * failsafe in that highly unlikely case. + */ + while (ovfstatus && max_loop--) { + /* Figure out which counter(s) overflowed */ + for_each_set_bit(i, &ovfstatus, n_counters) { + unsigned long ovfstatus_clear = 0; + + /* Update event->count for overflowed counter */ + event = idxd->idxd_pmu->event_list[i]; + perfmon_pmu_event_update(event); + /* Writing 1 to OVFSTATUS bit clears it */ + set_bit(i, &ovfstatus_clear); + iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd)); + } + + ovfstatus = ioread32(OVFSTATUS_REG(idxd)); + } + + /* + * Should never happen. If so, it means a counter(s) looped + * around twice while this handler was running. + */ + WARN_ON_ONCE(ovfstatus); +} + +static inline void perfmon_reset_config(struct idxd_device *idxd) +{ + iowrite32(CONFIG_RESET, PERFRST_REG(idxd)); + iowrite32(0, OVFSTATUS_REG(idxd)); + iowrite32(0, PERFFRZ_REG(idxd)); +} + +static inline void perfmon_reset_counters(struct idxd_device *idxd) +{ + iowrite32(CNTR_RESET, PERFRST_REG(idxd)); +} + +static inline void perfmon_reset(struct idxd_device *idxd) +{ + perfmon_reset_config(idxd); + perfmon_reset_counters(idxd); +} + +static void perfmon_pmu_event_start(struct perf_event *event, int mode) +{ + u32 flt_wq, flt_tc, flt_pg_sz, flt_xfer_sz, flt_eng = 0; + u64 cntr_cfg, cntrdata, event_enc, event_cat = 0; + struct hw_perf_event *hwc = &event->hw; + union filter_cfg flt_cfg; + union event_cfg event_cfg; + struct idxd_device *idxd; + int cntr; + + idxd = event_to_idxd(event); + + event->hw.idx = hwc->idx; + cntr = hwc->idx; + + /* Obtain event category and event value from user space */ + event_cfg.val = event->attr.config; + flt_cfg.val = event->attr.config1; + event_cat = event_cfg.event_cat; + event_enc = event_cfg.event_enc; + + /* Obtain filter configuration from user space */ + flt_wq = flt_cfg.wq; + flt_tc = flt_cfg.tc; + flt_pg_sz = flt_cfg.pg_sz; + flt_xfer_sz = flt_cfg.xfer_sz; + flt_eng = flt_cfg.eng; + + if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ)); + if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC)); + if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ)); + if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ)); + if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG)); + + /* Read the start value */ + cntrdata = ioread64(CNTRDATA_REG(idxd, cntr)); + local64_set(&event->hw.prev_count, cntrdata); + + /* Set counter to event/category */ + cntr_cfg = event_cat << CNTRCFG_CATEGORY_SHIFT; + cntr_cfg |= event_enc << CNTRCFG_EVENT_SHIFT; + /* Set interrupt on overflow and counter enable bits */ + cntr_cfg |= (CNTRCFG_IRQ_OVERFLOW | CNTRCFG_ENABLE); + + iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr)); +} + +static void perfmon_pmu_event_stop(struct perf_event *event, int mode) +{ + struct hw_perf_event *hwc = &event->hw; + struct idxd_device *idxd; + int i, cntr = hwc->idx; + u64 cntr_cfg; + + idxd = event_to_idxd(event); + + /* remove this event from event list */ + for (i = 0; i < idxd->idxd_pmu->n_events; i++) { + if (event != idxd->idxd_pmu->event_list[i]) + continue; + + for (++i; i < idxd->idxd_pmu->n_events; i++) + idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i]; + --idxd->idxd_pmu->n_events; + break; + } + + cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr)); + cntr_cfg &= ~CNTRCFG_ENABLE; + iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr)); + + if (mode == PERF_EF_UPDATE) + perfmon_pmu_event_update(event); + + event->hw.idx = -1; + clear_bit(cntr, idxd->idxd_pmu->used_mask); +} + +static void perfmon_pmu_event_del(struct perf_event *event, int mode) +{ + perfmon_pmu_event_stop(event, PERF_EF_UPDATE); +} + +static int perfmon_pmu_event_add(struct perf_event *event, int flags) +{ + struct idxd_device *idxd = event_to_idxd(event); + struct idxd_pmu *idxd_pmu = idxd->idxd_pmu; + struct hw_perf_event *hwc = &event->hw; + int idx, n; + + n = perfmon_collect_events(idxd_pmu, event, false); + if (n < 0) + return n; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + + idx = perfmon_assign_event(idxd_pmu, event); + if (idx < 0) + return idx; + + perfmon_assign_hw_event(idxd_pmu, event, idx); + + if (flags & PERF_EF_START) + perfmon_pmu_event_start(event, 0); + + idxd_pmu->n_events = n; + + return 0; +} + +static void enable_perfmon_pmu(struct idxd_device *idxd) +{ + iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd)); +} + +static void disable_perfmon_pmu(struct idxd_device *idxd) +{ + iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd)); +} + +static void perfmon_pmu_enable(struct pmu *pmu) +{ + struct idxd_device *idxd = pmu_to_idxd(pmu); + + enable_perfmon_pmu(idxd); +} + +static void perfmon_pmu_disable(struct pmu *pmu) +{ + struct idxd_device *idxd = pmu_to_idxd(pmu); + + disable_perfmon_pmu(idxd); +} + +static void skip_filter(int i) +{ + int j; + + for (j = i; j < PERFMON_FILTERS_MAX; j++) + perfmon_format_attrs[PERFMON_FILTERS_START + j] = + perfmon_format_attrs[PERFMON_FILTERS_START + j + 1]; +} + +static void idxd_pmu_init(struct idxd_pmu *idxd_pmu) +{ + int i; + + for (i = 0 ; i < PERFMON_FILTERS_MAX; i++) { + if (!test_bit(i, &idxd_pmu->supported_filters)) + skip_filter(i); + } + + idxd_pmu->pmu.name = idxd_pmu->name; + idxd_pmu->pmu.attr_groups = perfmon_attr_groups; + idxd_pmu->pmu.task_ctx_nr = perf_invalid_context; + idxd_pmu->pmu.event_init = perfmon_pmu_event_init; + idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable, + idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable, + idxd_pmu->pmu.add = perfmon_pmu_event_add; + idxd_pmu->pmu.del = perfmon_pmu_event_del; + idxd_pmu->pmu.start = perfmon_pmu_event_start; + idxd_pmu->pmu.stop = perfmon_pmu_event_stop; + idxd_pmu->pmu.read = perfmon_pmu_event_update; + idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; + idxd_pmu->pmu.module = THIS_MODULE; +} + +void perfmon_pmu_remove(struct idxd_device *idxd) +{ + if (!idxd->idxd_pmu) + return; + + cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node); + perf_pmu_unregister(&idxd->idxd_pmu->pmu); + kfree(idxd->idxd_pmu); + idxd->idxd_pmu = NULL; +} + +static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + struct idxd_pmu *idxd_pmu; + + idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); + + /* select the first online CPU as the designated reader */ + if (cpumask_empty(&perfmon_dsa_cpu_mask)) { + cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask); + idxd_pmu->cpu = cpu; + } + + return 0; +} + +static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node) +{ + struct idxd_pmu *idxd_pmu; + unsigned int target; + + idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); + + if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask)) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + + /* migrate events if there is a valid target */ + if (target < nr_cpu_ids) + cpumask_set_cpu(target, &perfmon_dsa_cpu_mask); + else + target = -1; + + perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target); + + return 0; +} + +int perfmon_pmu_init(struct idxd_device *idxd) +{ + union idxd_perfcap perfcap; + struct idxd_pmu *idxd_pmu; + int rc = -ENODEV; + + /* + * perfmon module initialization failed, nothing to do + */ + if (!cpuhp_set_up) + return -ENODEV; + + /* + * If perfmon_offset or num_counters is 0, it means perfmon is + * not supported on this hardware. + */ + if (idxd->perfmon_offset == 0) + return -ENODEV; + + idxd_pmu = kzalloc(sizeof(*idxd_pmu), GFP_KERNEL); + if (!idxd_pmu) + return -ENOMEM; + + idxd_pmu->idxd = idxd; + idxd->idxd_pmu = idxd_pmu; + + if (idxd->data->type == IDXD_TYPE_DSA) { + rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id); + if (rc < 0) + goto free; + } else if (idxd->data->type == IDXD_TYPE_IAX) { + rc = sprintf(idxd_pmu->name, "iax%d", idxd->id); + if (rc < 0) + goto free; + } else { + goto free; + } + + perfmon_reset(idxd); + + perfcap.bits = ioread64(PERFCAP_REG(idxd)); + + /* + * If total perf counter is 0, stop further registration. + * This is necessary in order to support driver running on + * guest which does not have pmon support. + */ + if (perfcap.num_perf_counter == 0) + goto free; + + /* A counter width of 0 means it can't count */ + if (perfcap.counter_width == 0) + goto free; + + /* Overflow interrupt and counter freeze support must be available */ + if (!perfcap.overflow_interrupt || !perfcap.counter_freeze) + goto free; + + /* Number of event categories cannot be 0 */ + if (perfcap.num_event_category == 0) + goto free; + + /* + * We don't support per-counter capabilities for now. + */ + if (perfcap.cap_per_counter) + goto free; + + idxd_pmu->n_event_categories = perfcap.num_event_category; + idxd_pmu->supported_event_categories = perfcap.global_event_category; + idxd_pmu->per_counter_caps_supported = perfcap.cap_per_counter; + + /* check filter capability. If 0, then filters are not supported */ + idxd_pmu->supported_filters = perfcap.filter; + if (perfcap.filter) + idxd_pmu->n_filters = hweight8(perfcap.filter); + + /* Store the total number of counters categories, and counter width */ + idxd_pmu->n_counters = perfcap.num_perf_counter; + idxd_pmu->counter_width = perfcap.counter_width; + + idxd_pmu_init(idxd_pmu); + + rc = perf_pmu_register(&idxd_pmu->pmu, idxd_pmu->name, -1); + if (rc) + goto free; + + rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node); + if (rc) { + perf_pmu_unregister(&idxd->idxd_pmu->pmu); + goto free; + } +out: + return rc; +free: + kfree(idxd_pmu); + idxd->idxd_pmu = NULL; + + goto out; +} + +void __init perfmon_init(void) +{ + int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "driver/dma/idxd/perf:online", + perf_event_cpu_online, + perf_event_cpu_offline); + if (WARN_ON(rc < 0)) + return; + + cpuhp_slot = rc; + cpuhp_set_up = true; +} + +void __exit perfmon_exit(void) +{ + if (cpuhp_set_up) + cpuhp_remove_multi_state(cpuhp_slot); +} diff --git a/drivers/dma/idxd/perfmon.h b/drivers/dma/idxd/perfmon.h new file mode 100644 index 000000000000..9a081a1bc605 --- /dev/null +++ b/drivers/dma/idxd/perfmon.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */ + +#ifndef _PERFMON_H_ +#define _PERFMON_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "registers.h" + +static inline struct idxd_pmu *event_to_pmu(struct perf_event *event) +{ + struct idxd_pmu *idxd_pmu; + struct pmu *pmu; + + pmu = event->pmu; + idxd_pmu = container_of(pmu, struct idxd_pmu, pmu); + + return idxd_pmu; +} + +static inline struct idxd_device *event_to_idxd(struct perf_event *event) +{ + struct idxd_pmu *idxd_pmu; + struct pmu *pmu; + + pmu = event->pmu; + idxd_pmu = container_of(pmu, struct idxd_pmu, pmu); + + return idxd_pmu->idxd; +} + +static inline struct idxd_device *pmu_to_idxd(struct pmu *pmu) +{ + struct idxd_pmu *idxd_pmu; + + idxd_pmu = container_of(pmu, struct idxd_pmu, pmu); + + return idxd_pmu->idxd; +} + +enum dsa_perf_events { + DSA_PERF_EVENT_WQ = 0, + DSA_PERF_EVENT_ENGINE, + DSA_PERF_EVENT_ADDR_TRANS, + DSA_PERF_EVENT_OP, + DSA_PERF_EVENT_COMPL, + DSA_PERF_EVENT_MAX, +}; + +enum filter_enc { + FLT_WQ = 0, + FLT_TC, + FLT_PG_SZ, + FLT_XFER_SZ, + FLT_ENG, + FLT_MAX, +}; + +#define CONFIG_RESET 0x0000000000000001 +#define CNTR_RESET 0x0000000000000002 +#define CNTR_ENABLE 0x0000000000000001 +#define INTR_OVFL 0x0000000000000002 + +#define COUNTER_FREEZE 0x00000000FFFFFFFF +#define COUNTER_UNFREEZE 0x0000000000000000 +#define OVERFLOW_SIZE 32 + +#define CNTRCFG_ENABLE BIT(0) +#define CNTRCFG_IRQ_OVERFLOW BIT(1) +#define CNTRCFG_CATEGORY_SHIFT 8 +#define CNTRCFG_EVENT_SHIFT 32 + +#define PERFMON_TABLE_OFFSET(_idxd) \ +({ \ + typeof(_idxd) __idxd = (_idxd); \ + ((__idxd)->reg_base + (__idxd)->perfmon_offset); \ +}) +#define PERFMON_REG_OFFSET(idxd, offset) \ + (PERFMON_TABLE_OFFSET(idxd) + (offset)) + +#define PERFCAP_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFCAP_OFFSET)) +#define PERFRST_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFRST_OFFSET)) +#define OVFSTATUS_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_OVFSTATUS_OFFSET)) +#define PERFFRZ_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFFRZ_OFFSET)) + +#define FLTCFG_REG(idxd, cntr, flt) \ + (PERFMON_REG_OFFSET(idxd, IDXD_FLTCFG_OFFSET) + ((cntr) * 32) + ((flt) * 4)) + +#define CNTRCFG_REG(idxd, cntr) \ + (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCFG_OFFSET) + ((cntr) * 8)) +#define CNTRDATA_REG(idxd, cntr) \ + (PERFMON_REG_OFFSET(idxd, IDXD_CNTRDATA_OFFSET) + ((cntr) * 8)) +#define CNTRCAP_REG(idxd, cntr) \ + (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCAP_OFFSET) + ((cntr) * 8)) + +#define EVNTCAP_REG(idxd, category) \ + (PERFMON_REG_OFFSET(idxd, IDXD_EVNTCAP_OFFSET) + ((category) * 8)) + +#define DEFINE_PERFMON_FORMAT_ATTR(_name, _format) \ +static ssize_t __perfmon_idxd_##_name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct kobj_attribute format_attr_idxd_##_name = \ + __ATTR(_name, 0444, __perfmon_idxd_##_name##_show, NULL) + +#endif diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index 6c11375cc56a..c970c3f025f0 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -386,4 +386,112 @@ union wqcfg { #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32) #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40) +/* Following is performance monitor registers */ +#define IDXD_PERFCAP_OFFSET 0x0 +union idxd_perfcap { + struct { + u64 num_perf_counter:6; + u64 rsvd1:2; + u64 counter_width:8; + u64 num_event_category:4; + u64 global_event_category:16; + u64 filter:8; + u64 rsvd2:8; + u64 cap_per_counter:1; + u64 writeable_counter:1; + u64 counter_freeze:1; + u64 overflow_interrupt:1; + u64 rsvd3:8; + }; + u64 bits; +} __packed; + +#define IDXD_EVNTCAP_OFFSET 0x80 +union idxd_evntcap { + struct { + u64 events:28; + u64 rsvd:36; + }; + u64 bits; +} __packed; + +struct idxd_event { + union { + struct { + u32 event_category:4; + u32 events:28; + }; + u32 val; + }; +} __packed; + +#define IDXD_CNTRCAP_OFFSET 0x800 +struct idxd_cntrcap { + union { + struct { + u32 counter_width:8; + u32 rsvd:20; + u32 num_events:4; + }; + u32 val; + }; + struct idxd_event events[]; +} __packed; + +#define IDXD_PERFRST_OFFSET 0x10 +union idxd_perfrst { + struct { + u32 perfrst_config:1; + u32 perfrst_counter:1; + u32 rsvd:30; + }; + u32 val; +} __packed; + +#define IDXD_OVFSTATUS_OFFSET 0x30 +#define IDXD_PERFFRZ_OFFSET 0x20 +#define IDXD_CNTRCFG_OFFSET 0x100 +union idxd_cntrcfg { + struct { + u64 enable:1; + u64 interrupt_ovf:1; + u64 global_freeze_ovf:1; + u64 rsvd1:5; + u64 event_category:4; + u64 rsvd2:20; + u64 events:28; + u64 rsvd3:4; + }; + u64 val; +} __packed; + +#define IDXD_FLTCFG_OFFSET 0x300 + +#define IDXD_CNTRDATA_OFFSET 0x200 +union idxd_cntrdata { + struct { + u64 event_count_value; + }; + u64 val; +} __packed; + +union event_cfg { + struct { + u64 event_cat:4; + u64 event_enc:28; + }; + u64 val; +} __packed; + +union filter_cfg { + struct { + u64 wq:32; + u64 tc:8; + u64 pg_sz:4; + u64 xfer_sz:8; + u64 eng:8; + }; + u64 val; +} __packed; + #endif diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f14adb882338..264d911424c0 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -167,6 +167,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, + CPUHP_AP_PERF_X86_IDXD_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_CFD_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, -- cgit v1.2.3-71-gd317 From 527139d738d7f2e9f929c752eebf3cbf0f74c754 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczyński Date: Fri, 16 Apr 2021 20:58:38 +0000 Subject: PCI/sysfs: Convert "rom" to static attribute MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The "rom" sysfs attribute allows access to the PCI Option ROM. Previously it was dynamically created either by pci_bus_add_device() or the pci_sysfs_init() initcall, but since it doesn't need to be created or removed dynamically, we can use a static attribute so the device model takes care of addition and removal automatically. Convert "rom" to a static attribute and use the .is_bin_visible() callback to set the correct object size based on the ROM size. Remove "rom_attr" from the struct pci_dev since it is no longer needed. This attribute was added in the pre-git era by https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git/commit/drivers/pci/pci-sysfs.c?id=f6d553444da2 [bhelgaas: commit log] Suggested-by: Oliver O'Halloran Link: https://lore.kernel.org/r/20210416205856.3234481-3-kw@linux.com Signed-off-by: Krzysztof Wilczyński Signed-off-by: Bjorn Helgaas --- drivers/pci/pci-sysfs.c | 67 +++++++++++++++++++++---------------------------- include/linux/pci.h | 1 - 2 files changed, 29 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index dc14daf404f5..fa8373685140 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1306,6 +1306,33 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, return count; } +static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0); + +static struct bin_attribute *pci_dev_rom_attrs[] = { + &bin_attr_rom, + NULL, +}; + +static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj, + struct bin_attribute *a, int n) +{ + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); + size_t rom_size; + + /* If the device has a ROM, try to expose it in sysfs. */ + rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + if (!rom_size) + return 0; + + a->size = rom_size; + + return a->attr.mode; +} + +static const struct attribute_group pci_dev_rom_attr_group = { + .bin_attrs = pci_dev_rom_attrs, + .is_bin_visible = pci_dev_rom_attr_is_visible, +}; static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1352,8 +1379,6 @@ error: int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) { int retval; - int rom_size; - struct bin_attribute *attr; if (!sysfs_initialized) return -EACCES; @@ -1362,43 +1387,15 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) if (retval) goto err; - /* If the device has a ROM, try to expose it in sysfs. */ - rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); - if (rom_size) { - attr = kzalloc(sizeof(*attr), GFP_ATOMIC); - if (!attr) { - retval = -ENOMEM; - goto err_resource_files; - } - sysfs_bin_attr_init(attr); - attr->size = rom_size; - attr->attr.name = "rom"; - attr->attr.mode = 0600; - attr->read = pci_read_rom; - attr->write = pci_write_rom; - retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); - if (retval) { - kfree(attr); - goto err_resource_files; - } - pdev->rom_attr = attr; - } - /* add sysfs entries for various capabilities */ retval = pci_create_capabilities_sysfs(pdev); if (retval) - goto err_rom_file; + goto err_resource_files; pci_create_firmware_label_files(pdev); return 0; -err_rom_file: - if (pdev->rom_attr) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); - kfree(pdev->rom_attr); - pdev->rom_attr = NULL; - } err_resource_files: pci_remove_resource_files(pdev); err: @@ -1427,13 +1424,6 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev) pci_remove_capabilities_sysfs(pdev); pci_remove_resource_files(pdev); - - if (pdev->rom_attr) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); - kfree(pdev->rom_attr); - pdev->rom_attr = NULL; - } - pci_remove_firmware_label_files(pdev); } @@ -1526,6 +1516,7 @@ static const struct attribute_group pci_dev_group = { const struct attribute_group *pci_dev_groups[] = { &pci_dev_group, &pci_dev_config_attr_group, + &pci_dev_rom_attr_group, NULL, }; diff --git a/include/linux/pci.h b/include/linux/pci.h index 86c799c97b77..45f1fef80b50 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -458,7 +458,6 @@ struct pci_dev { u32 saved_config_space[16]; /* Config space saved at suspend time */ struct hlist_head saved_cap_space; - struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */ int rom_attr_enabled; /* Display of ROM attribute enabled? */ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ -- cgit v1.2.3-71-gd317 From d991bb1c8da842a2a0b9dc83b1005e655783f861 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Thu, 29 Apr 2021 22:53:50 -0700 Subject: include/linux/compiler-gcc.h: sparse can do constant folding of __builtin_bswap*() Sparse can do constant folding of __builtin_bswap*() since 2017. Also, a much recent version of Sparse is needed anyway, see commit 6ec4476ac825 ("Raise gcc version requirement to 4.9"). So, remove the comment about sparse not being yet able to constant fold __builtin_bswap*() and remove the corresponding test of __CHECKER__. Link: https://lkml.kernel.org/r/20210226092236.99369-1-luc.vanoostenryck@gmail.com Signed-off-by: Luc Van Oostenryck Acked-by: Arnd Bergmann Acked-by: Miguel Ojeda Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compiler-gcc.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 48750243db4c..5d97ef738a57 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -90,15 +90,11 @@ */ #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) -/* - * sparse (__CHECKER__) pretends to be gcc, but can't do constant - * folding in __builtin_bswap*() (yet), so don't set these for it. - */ -#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ #define __HAVE_BUILTIN_BSWAP16__ -#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ #if GCC_VERSION >= 70000 #define KASAN_ABI_VERSION 5 -- cgit v1.2.3-71-gd317 From 8e9b16c47680f6e7d6e5864a37f313f905a91cf5 Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Thu, 29 Apr 2021 22:55:08 -0700 Subject: mm: page_owner: detect page_owner recursion via task_struct Before the change page_owner recursion was detected via fetching backtrace and inspecting it for current instruction pointer. It has a few problems: - it is slightly slow as it requires extra backtrace and a linear stack scan of the result - it is too late to check if backtrace fetching required memory allocation itself (ia64's unwinder requires it). To simplify recursion tracking let's use page_owner recursion flag in 'struct task_struct'. The change make page_owner=on work on ia64 by avoiding infinite recursion in: kmalloc() -> __set_page_owner() -> save_stack() -> unwind() [ia64-specific] -> build_script() -> kmalloc() -> __set_page_owner() [we short-circuit here] -> save_stack() -> unwind() [recursion] Link: https://lkml.kernel.org/r/20210402115342.1463781-1-slyfox@gentoo.org Signed-off-by: Sergei Trofimovich Reviewed-by: Andrew Morton Acked-by: Vlastimil Babka Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Juri Lelli Cc: Vincent Guittot Cc: Dietmar Eggemann Cc: Steven Rostedt Cc: Ben Segall Cc: Mel Gorman Cc: Daniel Bristot de Oliveira Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 4 ++++ mm/page_owner.c | 32 ++++++++++---------------------- 2 files changed, 14 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index d7d07aa0facf..9c25c8e67030 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -841,6 +841,10 @@ struct task_struct { /* Stalled due to lack of memory */ unsigned in_memstall:1; #endif +#ifdef CONFIG_PAGE_OWNER + /* Used by page_owner=on to detect recursion in page tracking. */ + unsigned in_page_owner:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ diff --git a/mm/page_owner.c b/mm/page_owner.c index 5c941ca9861d..9661d5320a07 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -98,42 +98,30 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext) return (void *)page_ext + page_owner_ops.offset; } -static inline bool check_recursive_alloc(unsigned long *entries, - unsigned int nr_entries, - unsigned long ip) -{ - unsigned int i; - - for (i = 0; i < nr_entries; i++) { - if (entries[i] == ip) - return true; - } - return false; -} - static noinline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[PAGE_OWNER_STACK_DEPTH]; depot_stack_handle_t handle; unsigned int nr_entries; - nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); - /* - * We need to check recursion here because our request to - * stackdepot could trigger memory allocation to save new - * entry. New memory allocation would reach here and call - * stack_depot_save_entries() again if we don't catch it. There is - * still not enough memory in stackdepot so it would try to - * allocate memory again and loop forever. + * Avoid recursion. + * + * Sometimes page metadata allocation tracking requires more + * memory to be allocated: + * - when new stack trace is saved to stack depot + * - when backtrace itself is calculated (ia64) */ - if (check_recursive_alloc(entries, nr_entries, _RET_IP_)) + if (current->in_page_owner) return dummy_handle; + current->in_page_owner = 1; + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); handle = stack_depot_save(entries, nr_entries, flags); if (!handle) handle = failure_handle; + current->in_page_owner = 0; return handle; } -- cgit v1.2.3-71-gd317 From 63135aa3866db99fd923b716c5ff2e468879624a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 29 Apr 2021 22:55:18 -0700 Subject: mm: provide filemap_range_needs_writeback() helper Patch series "Improve IOCB_NOWAIT O_DIRECT reads", v3. An internal workload complained because it was using too much CPU, and when I took a look, we had a lot of io_uring workers going to town. For an async buffered read like workload, I am normally expecting _zero_ offloads to a worker thread, but this one had tons of them. I'd drop caches and things would look good again, but then a minute later we'd regress back to using workers. Turns out that every minute something was reading parts of the device, which would add page cache for that inode. I put patches like these in for our kernel, and the problem was solved. Don't -EAGAIN IOCB_NOWAIT dio reads just because we have page cache entries for the given range. This causes unnecessary work from the callers side, when the IO could have been issued totally fine without blocking on writeback when there is none. This patch (of 3): For O_DIRECT reads/writes, we check if we need to issue a call to filemap_write_and_wait_range() to issue and/or wait for writeback for any page in the given range. The existing mechanism just checks for a page in the range, which is suboptimal for IOCB_NOWAIT as we'll fallback to the slow path (and needing retry) if there's just a clean page cache page in the range. Provide filemap_range_needs_writeback() which tries a little harder to check if we actually need to issue and/or wait for writeback in the range. Link: https://lkml.kernel.org/r/20210224164455.1096727-1-axboe@kernel.dk Link: https://lkml.kernel.org/r/20210224164455.1096727-2-axboe@kernel.dk Signed-off-by: Jens Axboe Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fs.h | 2 ++ mm/filemap.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index bf4e90d3ab18..12766edee81f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2878,6 +2878,8 @@ static inline int filemap_fdatawait(struct address_space *mapping) extern bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); +extern bool filemap_range_needs_writeback(struct address_space *, + loff_t lstart, loff_t lend); extern int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); extern int __filemap_fdatawrite_range(struct address_space *mapping, diff --git a/mm/filemap.c b/mm/filemap.c index 151090fdcf29..1750742b5689 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -635,6 +635,49 @@ static bool mapping_needs_writeback(struct address_space *mapping) return mapping->nrpages; } +/** + * filemap_range_needs_writeback - check if range potentially needs writeback + * @mapping: address space within which to check + * @start_byte: offset in bytes where the range starts + * @end_byte: offset in bytes where the range ends (inclusive) + * + * Find at least one page in the range supplied, usually used to check if + * direct writing in this range will trigger a writeback. Used by O_DIRECT + * read/write with IOCB_NOWAIT, to see if the caller needs to do + * filemap_write_and_wait_range() before proceeding. + * + * Return: %true if the caller should do filemap_write_and_wait_range() before + * doing O_DIRECT to a page in this range, %false otherwise. + */ +bool filemap_range_needs_writeback(struct address_space *mapping, + loff_t start_byte, loff_t end_byte) +{ + XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); + pgoff_t max = end_byte >> PAGE_SHIFT; + struct page *page; + + if (!mapping_needs_writeback(mapping)) + return false; + if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && + !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) + return false; + if (end_byte < start_byte) + return false; + + rcu_read_lock(); + xas_for_each(&xas, page, max) { + if (xas_retry(&xas, page)) + continue; + if (xa_is_value(page)) + continue; + if (PageDirty(page) || PageLocked(page) || PageWriteback(page)) + break; + } + rcu_read_unlock(); + return page != NULL; +} +EXPORT_SYMBOL_GPL(filemap_range_needs_writeback); + /** * filemap_write_and_wait_range - write out & wait on a file range * @mapping: the address_space for the pages -- cgit v1.2.3-71-gd317 From 1c824a680b1b67ad43c0908f11a70bcf37af56d5 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 29 Apr 2021 22:55:32 -0700 Subject: mm: page-writeback: simplify memcg handling in test_clear_page_writeback() Page writeback doesn't hold a page reference, which allows truncate to free a page the second PageWriteback is cleared. This used to require special attention in test_clear_page_writeback(), where we had to be careful not to rely on the unstable page->memcg binding and look up all the necessary information before clearing the writeback flag. Since commit 073861ed77b6 ("mm: fix VM_BUG_ON(PageTail) and BUG_ON(PageWriteback)") test_clear_page_writeback() is called with an explicit reference on the page, and this dance is no longer needed. Use unlock_page_memcg() and dec_lruvec_page_state() directly. This removes the last user of the lock_page_memcg() return value, change it to void. Touch up the comments in there as well. This also removes the last extern user of __unlock_page_memcg(), make it static. Further, it removes the last user of dec_lruvec_state(), delete it, along with a few other unused helpers. Link: https://lkml.kernel.org/r/YCQbYAWg4nvBFL6h@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Hugh Dickins Reviewed-by: Shakeel Butt Acked-by: Michal Hocko Cc: Roman Gushchin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 10 ++-------- include/linux/vmstat.h | 24 +++--------------------- mm/memcontrol.c | 36 +++++++++++------------------------- mm/page-writeback.c | 9 +++------ 4 files changed, 19 insertions(+), 60 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0c04d39a7967..ae448d955a87 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -867,8 +867,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); extern bool cgroup_memory_noswap; #endif -struct mem_cgroup *lock_page_memcg(struct page *page); -void __unlock_page_memcg(struct mem_cgroup *memcg); +void lock_page_memcg(struct page *page); void unlock_page_memcg(struct page *page); /* @@ -1289,12 +1288,7 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) { } -static inline struct mem_cgroup *lock_page_memcg(struct page *page) -{ - return NULL; -} - -static inline void __unlock_page_memcg(struct mem_cgroup *memcg) +static inline void lock_page_memcg(struct page *page) { } diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 506d625163a1..3299cd69e4ca 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -512,16 +512,10 @@ static inline void mod_lruvec_page_state(struct page *page, #endif /* CONFIG_MEMCG */ -static inline void __inc_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - __mod_lruvec_state(lruvec, idx, 1); -} - -static inline void __dec_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) +static inline void inc_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) { - __mod_lruvec_state(lruvec, idx, -1); + mod_lruvec_state(lruvec, idx, 1); } static inline void __inc_lruvec_page_state(struct page *page, @@ -536,18 +530,6 @@ static inline void __dec_lruvec_page_state(struct page *page, __mod_lruvec_page_state(page, idx, -1); } -static inline void inc_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - mod_lruvec_state(lruvec, idx, 1); -} - -static inline void dec_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - mod_lruvec_state(lruvec, idx, -1); -} - static inline void inc_lruvec_page_state(struct page *page, enum node_stat_item idx) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e064ac0d850a..06caac775abb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2118,11 +2118,10 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) * This function protects unlocked LRU pages from being moved to * another cgroup. * - * It ensures lifetime of the returned memcg. Caller is responsible - * for the lifetime of the page; __unlock_page_memcg() is available - * when @page might get freed inside the locked section. + * It ensures lifetime of the locked memcg. Caller is responsible + * for the lifetime of the page. */ -struct mem_cgroup *lock_page_memcg(struct page *page) +void lock_page_memcg(struct page *page) { struct page *head = compound_head(page); /* rmap on tail pages */ struct mem_cgroup *memcg; @@ -2132,21 +2131,15 @@ struct mem_cgroup *lock_page_memcg(struct page *page) * The RCU lock is held throughout the transaction. The fast * path can get away without acquiring the memcg->move_lock * because page moving starts with an RCU grace period. - * - * The RCU lock also protects the memcg from being freed when - * the page state that is going to change is the only thing - * preventing the page itself from being freed. E.g. writeback - * doesn't hold a page reference and relies on PG_writeback to - * keep off truncation, migration and so forth. */ rcu_read_lock(); if (mem_cgroup_disabled()) - return NULL; + return; again: memcg = page_memcg(head); if (unlikely(!memcg)) - return NULL; + return; #ifdef CONFIG_PROVE_LOCKING local_irq_save(flags); @@ -2155,7 +2148,7 @@ again: #endif if (atomic_read(&memcg->moving_account) <= 0) - return memcg; + return; spin_lock_irqsave(&memcg->move_lock, flags); if (memcg != page_memcg(head)) { @@ -2164,24 +2157,17 @@ again: } /* - * When charge migration first begins, we can have locked and - * unlocked page stat updates happening concurrently. Track - * the task who has the lock for unlock_page_memcg(). + * When charge migration first begins, we can have multiple + * critical sections holding the fast-path RCU lock and one + * holding the slowpath move_lock. Track the task who has the + * move_lock for unlock_page_memcg(). */ memcg->move_lock_task = current; memcg->move_lock_flags = flags; - - return memcg; } EXPORT_SYMBOL(lock_page_memcg); -/** - * __unlock_page_memcg - unlock and unpin a memcg - * @memcg: the memcg - * - * Unlock and unpin a memcg returned by lock_page_memcg(). - */ -void __unlock_page_memcg(struct mem_cgroup *memcg) +static void __unlock_page_memcg(struct mem_cgroup *memcg) { if (memcg && memcg->move_lock_task == current) { unsigned long flags = memcg->move_lock_flags; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 9e35b636a393..5e761fb62800 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2722,12 +2722,9 @@ EXPORT_SYMBOL(clear_page_dirty_for_io); int test_clear_page_writeback(struct page *page) { struct address_space *mapping = page_mapping(page); - struct mem_cgroup *memcg; - struct lruvec *lruvec; int ret; - memcg = lock_page_memcg(page); - lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); + lock_page_memcg(page); if (mapping && mapping_use_writeback_tags(mapping)) { struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); @@ -2755,11 +2752,11 @@ int test_clear_page_writeback(struct page *page) ret = TestClearPageWriteback(page); } if (ret) { - dec_lruvec_state(lruvec, NR_WRITEBACK); + dec_lruvec_page_state(page, NR_WRITEBACK); dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); inc_node_page_state(page, NR_WRITTEN); } - __unlock_page_memcg(memcg); + unlock_page_memcg(page); return ret; } -- cgit v1.2.3-71-gd317 From 842ca547f706b1e05ccf3026a0ab15d24772a188 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 22:55:35 -0700 Subject: mm: move page_mapping_file to pagemap.h page_mapping_file() is only used by some architectures, and then it is usually only used in one place. Make it a static inline function so other architectures don't have to carry this dead code. Link: https://lkml.kernel.org/r/20210317123011.350118-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Acked-by: Mike Rapoport Cc: Huang Ying Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/copypage-v4mc.c | 1 + arch/arm/mm/copypage-v6.c | 1 + arch/arm/mm/copypage-xscale.c | 1 + arch/csky/abiv1/cacheflush.c | 1 + arch/mips/mm/cache.c | 1 + arch/nios2/mm/cacheflush.c | 1 + arch/sh/mm/cache-sh4.c | 1 + arch/sh/mm/cache-sh7705.c | 1 + arch/sparc/mm/tlb.c | 1 + include/linux/mm.h | 1 - include/linux/pagemap.h | 10 ++++++++++ mm/util.c | 10 ---------- 12 files changed, 19 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 44f7292ec27b..f1da3b439b96 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 6a769a6c314e..d8a115de5507 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index eb5d338657d1..bcb485620a05 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c index 9f1fe80cc847..07ff17ea33de 100644 --- a/arch/csky/abiv1/cacheflush.c +++ b/arch/csky/abiv1/cacheflush.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 7719d632df8d..a7bf0c80371c 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 65de1bd6a760..6aa9257c3ede 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index ddfa9685f1ef..72c2e1b46c08 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 4c67b3d88775..9b63a53a5e46 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 20ee14739333..9a725547578e 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/mm.h b/include/linux/mm.h index 21115933b9b8..2e5c207e702c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1629,7 +1629,6 @@ static inline pgoff_t page_index(struct page *page) bool page_mapped(struct page *page); struct address_space *page_mapping(struct page *page); -struct address_space *page_mapping_file(struct page *page); /* * Return true only if the page has been allocated with diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 4686f9ab0636..469fa7ffcf96 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -157,6 +157,16 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) void release_pages(struct page **pages, int nr); +/* + * For file cache pages, return the address_space, otherwise return NULL + */ +static inline struct address_space *page_mapping_file(struct page *page) +{ + if (unlikely(PageSwapCache(page))) + return NULL; + return page_mapping(page); +} + /* * speculatively take a reference to a page. * If the page is free (_refcount == 0), then _refcount is untouched, and 0 diff --git a/mm/util.c b/mm/util.c index c37e24d5fa43..083c5c417cfc 100644 --- a/mm/util.c +++ b/mm/util.c @@ -711,16 +711,6 @@ struct address_space *page_mapping(struct page *page) } EXPORT_SYMBOL(page_mapping); -/* - * For file cache pages, return the address_space, otherwise return NULL - */ -struct address_space *page_mapping_file(struct page *page) -{ - if (unlikely(PageSwapCache(page))) - return NULL; - return page_mapping(page); -} - /* Slow path of page_mapcount() for compound pages */ int __page_mapcount(struct page *page) { -- cgit v1.2.3-71-gd317 From 458a4f788f8602e5701b3d8c2fb6b021310a7301 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Thu, 29 Apr 2021 22:55:50 -0700 Subject: mm/gup: add a range variant of unpin_user_pages_dirty_lock() Add an unpin_user_page_range_dirty_lock() API which takes a starting page and how many consecutive pages we want to unpin and optionally dirty. To that end, define another iterator for_each_compound_range() that operates in page ranges as opposed to page array. For users (like RDMA mr_dereg) where each sg represents a contiguous set of pages, we're able to more efficiently unpin pages without having to supply an array of pages much of what happens today with unpin_user_pages(). Link: https://lkml.kernel.org/r/20210212130843.13865-4-joao.m.martins@oracle.com Suggested-by: Jason Gunthorpe Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: John Hubbard Cc: Christoph Hellwig Cc: Doug Ledford Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ mm/gup.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2e5c207e702c..702c2a7379d6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1265,6 +1265,8 @@ static inline void put_page(struct page *page) void unpin_user_page(struct page *page); void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty); +void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, + bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); /** diff --git a/mm/gup.c b/mm/gup.c index de1b75ef44da..66522ae28d09 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -213,6 +213,32 @@ void unpin_user_page(struct page *page) } EXPORT_SYMBOL(unpin_user_page); +static inline void compound_range_next(unsigned long i, unsigned long npages, + struct page **list, struct page **head, + unsigned int *ntails) +{ + struct page *next, *page; + unsigned int nr = 1; + + if (i >= npages) + return; + + next = *list + i; + page = compound_head(next); + if (PageCompound(page) && compound_order(page) >= 1) + nr = min_t(unsigned int, + page + compound_nr(page) - next, npages - i); + + *head = page; + *ntails = nr; +} + +#define for_each_compound_range(__i, __list, __npages, __head, __ntails) \ + for (__i = 0, \ + compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \ + __i < __npages; __i += __ntails, \ + compound_range_next(__i, __npages, __list, &(__head), &(__ntails))) + static inline void compound_next(unsigned long i, unsigned long npages, struct page **list, struct page **head, unsigned int *ntails) @@ -301,6 +327,42 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, } EXPORT_SYMBOL(unpin_user_pages_dirty_lock); +/** + * unpin_user_page_range_dirty_lock() - release and optionally dirty + * gup-pinned page range + * + * @page: the starting page of a range maybe marked dirty, and definitely released. + * @npages: number of consecutive pages to release. + * @make_dirty: whether to mark the pages dirty + * + * "gup-pinned page range" refers to a range of pages that has had one of the + * pin_user_pages() variants called on that page. + * + * For the page ranges defined by [page .. page+npages], make that range (or + * its head pages, if a compound page) dirty, if @make_dirty is true, and if the + * page range was previously listed as clean. + * + * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is + * required, then the caller should a) verify that this is really correct, + * because _lock() is usually required, and b) hand code it: + * set_page_dirty_lock(), unpin_user_page(). + * + */ +void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, + bool make_dirty) +{ + unsigned long index; + struct page *head; + unsigned int ntails; + + for_each_compound_range(index, &page, npages, head, ntails) { + if (make_dirty && !PageDirty(head)) + set_page_dirty_lock(head); + put_compound_head(head, ntails, FOLL_PIN); + } +} +EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); + /** * unpin_user_pages() - release an array of gup-pinned pages. * @pages: array of pages to be marked dirty and released. -- cgit v1.2.3-71-gd317 From 4066c119483af8e86a75447fd35be1d2553d370f Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 29 Apr 2021 22:55:56 -0700 Subject: mm: gup: remove FOLL_SPLIT Since commit 5a52c9df62b4 ("uprobe: use FOLL_SPLIT_PMD instead of FOLL_SPLIT") and commit ba925fa35057 ("s390/gmap: improve THP splitting") FOLL_SPLIT has not been used anymore. Remove the dead code. Link: https://lkml.kernel.org/r/20210330203900.9222-1-shy828301@gmail.com Signed-off-by: Yang Shi Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/vm/transhuge.rst | 5 ----- include/linux/mm.h | 1 - mm/gup.c | 28 ++-------------------------- 3 files changed, 2 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/Documentation/vm/transhuge.rst b/Documentation/vm/transhuge.rst index 0ed23e59abe5..216db1d67d04 100644 --- a/Documentation/vm/transhuge.rst +++ b/Documentation/vm/transhuge.rst @@ -53,11 +53,6 @@ prevent the page from being split by anyone. of handling GUP on hugetlbfs will also work fine on transparent hugepage backed mappings. -In case you can't handle compound pages if they're returned by -follow_page, the FOLL_SPLIT bit can be specified as a parameter to -follow_page, so that it will split the hugepages before returning -them. - Graceful fallback ================= diff --git a/include/linux/mm.h b/include/linux/mm.h index 702c2a7379d6..64be3baf861a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2791,7 +2791,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO * and return without waiting upon it */ #define FOLL_POPULATE 0x40 /* fault in page */ -#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ diff --git a/mm/gup.c b/mm/gup.c index 66522ae28d09..71e546e279fc 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -516,18 +516,6 @@ retry: } } - if (flags & FOLL_SPLIT && PageTransCompound(page)) { - get_page(page); - pte_unmap_unlock(ptep, ptl); - lock_page(page); - ret = split_huge_page(page); - unlock_page(page); - put_page(page); - if (ret) - return ERR_PTR(ret); - goto retry; - } - /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ if (unlikely(!try_grab_page(page, flags))) { page = ERR_PTR(-ENOMEM); @@ -672,7 +660,7 @@ retry_locked: spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } - if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) { + if (flags & FOLL_SPLIT_PMD) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { @@ -681,19 +669,7 @@ retry_locked: split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; - } else if (flags & FOLL_SPLIT) { - if (unlikely(!try_get_page(page))) { - spin_unlock(ptl); - return ERR_PTR(-ENOMEM); - } - spin_unlock(ptl); - lock_page(page); - ret = split_huge_page(page); - unlock_page(page); - put_page(page); - if (pmd_none(*pmd)) - return no_page_table(vma, flags); - } else { /* flags & FOLL_SPLIT_PMD */ + } else { spin_unlock(ptl); split_huge_pmd(vma, pmd, address); ret = pte_alloc(mm, pmd) ? -ENOMEM : 0; -- cgit v1.2.3-71-gd317 From a3747b53b1771a787fea71d86a2fc39aea337685 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 29 Apr 2021 22:56:14 -0700 Subject: mm: memcontrol: kill mem_cgroup_nodeinfo() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to encapsulate a simple struct member access. Link: https://lkml.kernel.org/r/20210209163304.77088-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Reviewed-by: Shakeel Butt Reviewed-by: Roman Gushchin Acked-by: Michal Hocko Reviewed-by: Michal Koutný Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 8 +------- mm/memcontrol.c | 21 +++++++++++---------- 2 files changed, 12 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ae448d955a87..149dc2fd97a6 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -602,12 +602,6 @@ void mem_cgroup_uncharge_list(struct list_head *page_list); void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); -static struct mem_cgroup_per_node * -mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) -{ - return memcg->nodeinfo[nid]; -} - /** * mem_cgroup_lruvec - get the lru list vector for a memcg & node * @memcg: memcg of the wanted lruvec @@ -631,7 +625,7 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, if (!memcg) memcg = root_mem_cgroup; - mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); + mz = memcg->nodeinfo[pgdat->node_id]; lruvec = &mz->lruvec; out: /* diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5be0e8834a5a..b4a73c41312e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -414,13 +414,14 @@ static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, int size, int old_size) { struct memcg_shrinker_map *new, *old; + struct mem_cgroup_per_node *pn; int nid; lockdep_assert_held(&memcg_shrinker_map_mutex); for_each_node(nid) { - old = rcu_dereference_protected( - mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); + pn = memcg->nodeinfo[nid]; + old = rcu_dereference_protected(pn->shrinker_map, true); /* Not yet online memcg */ if (!old) return 0; @@ -433,7 +434,7 @@ static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, memset(new->map, (int)0xff, old_size); memset((void *)new->map + old_size, 0, size - old_size); - rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); + rcu_assign_pointer(pn->shrinker_map, new); call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); } @@ -450,7 +451,7 @@ static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) return; for_each_node(nid) { - pn = mem_cgroup_nodeinfo(memcg, nid); + pn = memcg->nodeinfo[nid]; map = rcu_dereference_protected(pn->shrinker_map, true); kvfree(map); rcu_assign_pointer(pn->shrinker_map, NULL); @@ -713,7 +714,7 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) int nid; for_each_node(nid) { - mz = mem_cgroup_nodeinfo(memcg, nid); + mz = memcg->nodeinfo[nid]; mctz = soft_limit_tree_node(nid); if (mctz) mem_cgroup_remove_exceeded(mz, mctz); @@ -796,7 +797,7 @@ parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) parent = parent_mem_cgroup(pn->memcg); if (!parent) return NULL; - return mem_cgroup_nodeinfo(parent, nid); + return parent->nodeinfo[nid]; } void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, @@ -1136,7 +1137,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, if (reclaim) { struct mem_cgroup_per_node *mz; - mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); + mz = root->nodeinfo[reclaim->pgdat->node_id]; iter = &mz->iter; if (prev && reclaim->generation != iter->generation) @@ -1238,7 +1239,7 @@ static void __invalidate_reclaim_iterators(struct mem_cgroup *from, int nid; for_each_node(nid) { - mz = mem_cgroup_nodeinfo(from, nid); + mz = from->nodeinfo[nid]; iter = &mz->iter; cmpxchg(&iter->position, dead_memcg, NULL); } @@ -2397,7 +2398,7 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) struct mem_cgroup_per_node *pn; long x; - pn = mem_cgroup_nodeinfo(memcg, nid); + pn = memcg->nodeinfo[nid]; lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu); x = lstatc->count[i]; @@ -4098,7 +4099,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) unsigned long file_cost = 0; for_each_online_pgdat(pgdat) { - mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); + mz = memcg->nodeinfo[pgdat->node_id]; anon_cost += mz->lruvec.anon_cost; file_cost += mz->lruvec.file_cost; -- cgit v1.2.3-71-gd317 From a18e6e6e150a98b9ce3e9acabeff407e7b6ba0c0 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 29 Apr 2021 22:56:17 -0700 Subject: mm: memcontrol: privatize memcg_page_state query functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are no users outside of the memory controller itself. The rest of the kernel cares either about node or lruvec stats. Link: https://lkml.kernel.org/r/20210209163304.77088-4-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Reviewed-by: Shakeel Butt Reviewed-by: Roman Gushchin Acked-by: Michal Hocko Reviewed-by: Michal Koutný Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 44 -------------------------------------------- mm/memcontrol.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 149dc2fd97a6..9a02aabd0000 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -864,39 +864,6 @@ extern bool cgroup_memory_noswap; void lock_page_memcg(struct page *page); void unlock_page_memcg(struct page *page); -/* - * idx can be of type enum memcg_stat_item or node_stat_item. - * Keep in sync with memcg_exact_page_state(). - */ -static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) -{ - long x = atomic_long_read(&memcg->vmstats[idx]); -#ifdef CONFIG_SMP - if (x < 0) - x = 0; -#endif - return x; -} - -/* - * idx can be of type enum memcg_stat_item or node_stat_item. - * Keep in sync with memcg_exact_page_state(). - */ -static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, - int idx) -{ - long x = 0; - int cpu; - - for_each_possible_cpu(cpu) - x += per_cpu(memcg->vmstats_local->stat[idx], cpu); -#ifdef CONFIG_SMP - if (x < 0) - x = 0; -#endif - return x; -} - void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); /* idx can be of type enum memcg_stat_item or node_stat_item */ @@ -1322,17 +1289,6 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) { } -static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) -{ - return 0; -} - -static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, - int idx) -{ - return 0; -} - static inline void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int nr) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b4a73c41312e..56d1c6e58c3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -789,6 +789,38 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); } +/* + * idx can be of type enum memcg_stat_item or node_stat_item. + * Keep in sync with memcg_exact_page_state(). + */ +static unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) +{ + long x = atomic_long_read(&memcg->vmstats[idx]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +/* + * idx can be of type enum memcg_stat_item or node_stat_item. + * Keep in sync with memcg_exact_page_state(). + */ +static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) +{ + long x = 0; + int cpu; + + for_each_possible_cpu(cpu) + x += per_cpu(memcg->vmstats_local->stat[idx], cpu); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + static struct mem_cgroup_per_node * parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) { -- cgit v1.2.3-71-gd317 From 2d146aa3aa842d7f5065802556b4f9a2c6e8ef12 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 29 Apr 2021 22:56:26 -0700 Subject: mm: memcontrol: switch to rstat MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the memory controller's custom hierarchical stats code with the generic rstat infrastructure provided by the cgroup core. The current implementation does batched upward propagation from the write side (i.e. as stats change). The per-cpu batches introduce an error, which is multiplied by the number of subgroups in a tree. In systems with many CPUs and sizable cgroup trees, the error can be large enough to confuse users (e.g. 32 batch pages * 32 CPUs * 32 subgroups results in an error of up to 128M per stat item). This can entirely swallow allocation bursts inside a workload that the user is expecting to see reflected in the statistics. In the past, we've done read-side aggregation, where a memory.stat read would have to walk the entire subtree and add up per-cpu counts. This became problematic with lazily-freed cgroups: we could have large subtrees where most cgroups were entirely idle. Hence the switch to change-driven upward propagation. Unfortunately, it needed to trade accuracy for speed due to the write side being so hot. Rstat combines the best of both worlds: from the write side, it cheaply maintains a queue of cgroups that have pending changes, so that the read side can do selective tree aggregation. This way the reported stats will always be precise and recent as can be, while the aggregation can skip over potentially large numbers of idle cgroups. The way rstat works is that it implements a tree for tracking cgroups with pending local changes, as well as a flush function that walks the tree upwards. The controller then drives this by 1) telling rstat when a local cgroup stat changes (e.g. mod_memcg_state) and 2) when a flush is required to get uptodate hierarchy stats for a given subtree (e.g. when memory.stat is read). The controller also provides a flush callback that is called during the rstat flush walk for each cgroup and aggregates its local per-cpu counters and propagates them upwards. This adds a second vmstats to struct mem_cgroup (MEMCG_NR_STAT + NR_VM_EVENT_ITEMS) to track pending subtree deltas during upward aggregation. It removes 3 words from the per-cpu data. It eliminates memcg_exact_page_state(), since memcg_page_state() is now exact. [akpm@linux-foundation.org: merge fix] [hannes@cmpxchg.org: fix a sleep in atomic section problem] Link: https://lkml.kernel.org/r/20210315234100.64307-1-hannes@cmpxchg.org Link: https://lkml.kernel.org/r/20210209163304.77088-7-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Reviewed-by: Roman Gushchin Acked-by: Michal Hocko Reviewed-by: Shakeel Butt Reviewed-by: Michal Koutný Acked-by: Balbir Singh Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 67 ++++++++------ mm/memcontrol.c | 218 ++++++++++++++++++--------------------------- 2 files changed, 127 insertions(+), 158 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 9a02aabd0000..74910ce9a3f9 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -76,10 +76,27 @@ enum mem_cgroup_events_target { }; struct memcg_vmstats_percpu { - long stat[MEMCG_NR_STAT]; - unsigned long events[NR_VM_EVENT_ITEMS]; - unsigned long nr_page_events; - unsigned long targets[MEM_CGROUP_NTARGETS]; + /* Local (CPU and cgroup) page state & events */ + long state[MEMCG_NR_STAT]; + unsigned long events[NR_VM_EVENT_ITEMS]; + + /* Delta calculation for lockless upward propagation */ + long state_prev[MEMCG_NR_STAT]; + unsigned long events_prev[NR_VM_EVENT_ITEMS]; + + /* Cgroup1: threshold notifications & softlimit tree updates */ + unsigned long nr_page_events; + unsigned long targets[MEM_CGROUP_NTARGETS]; +}; + +struct memcg_vmstats { + /* Aggregated (CPU and subtree) page state & events */ + long state[MEMCG_NR_STAT]; + unsigned long events[NR_VM_EVENT_ITEMS]; + + /* Pending child counts during tree propagation */ + long state_pending[MEMCG_NR_STAT]; + unsigned long events_pending[NR_VM_EVENT_ITEMS]; }; struct mem_cgroup_reclaim_iter { @@ -287,8 +304,8 @@ struct mem_cgroup { MEMCG_PADDING(_pad1_); - atomic_long_t vmstats[MEMCG_NR_STAT]; - atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; + /* memory.stat */ + struct memcg_vmstats vmstats; /* memory.events */ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; @@ -315,10 +332,6 @@ struct mem_cgroup { atomic_t moving_account; struct task_struct *move_lock_task; - /* Legacy local VM stats and events */ - struct memcg_vmstats_percpu __percpu *vmstats_local; - - /* Subtree VM stats and events (batched updates) */ struct memcg_vmstats_percpu __percpu *vmstats_percpu; #ifdef CONFIG_CGROUP_WRITEBACK @@ -939,10 +952,6 @@ static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, local_irq_restore(flags); } -unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, - gfp_t gfp_mask, - unsigned long *total_scanned); - void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count); @@ -1023,6 +1032,10 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, void split_page_memcg(struct page *head, unsigned int nr); +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, + gfp_t gfp_mask, + unsigned long *total_scanned); + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1131,6 +1144,10 @@ static inline bool lruvec_holds_page_lru_lock(struct page *page, return lruvec == &pgdat->__lruvec; } +static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) +{ +} + static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { return NULL; @@ -1334,18 +1351,6 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, mod_node_page_state(page_pgdat(page), idx, val); } -static inline -unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, - gfp_t gfp_mask, - unsigned long *total_scanned) -{ - return 0; -} - -static inline void split_page_memcg(struct page *head, unsigned int nr) -{ -} - static inline void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count) @@ -1368,8 +1373,16 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } -static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) +static inline void split_page_memcg(struct page *head, unsigned int nr) +{ +} + +static inline +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, + gfp_t gfp_mask, + unsigned long *total_scanned) { + return 0; } #endif /* CONFIG_MEMCG */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 56d1c6e58c3b..b323588223ac 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -765,37 +765,17 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) */ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) { - long x, threshold = MEMCG_CHARGE_BATCH; - if (mem_cgroup_disabled()) return; - if (memcg_stat_item_in_bytes(idx)) - threshold <<= PAGE_SHIFT; - - x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); - if (unlikely(abs(x) > threshold)) { - struct mem_cgroup *mi; - - /* - * Batch local counters to keep them in sync with - * the hierarchical ones. - */ - __this_cpu_add(memcg->vmstats_local->stat[idx], x); - for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) - atomic_long_add(x, &mi->vmstats[idx]); - x = 0; - } - __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); + __this_cpu_add(memcg->vmstats_percpu->state[idx], val); + cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); } -/* - * idx can be of type enum memcg_stat_item or node_stat_item. - * Keep in sync with memcg_exact_page_state(). - */ +/* idx can be of type enum memcg_stat_item or node_stat_item. */ static unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) { - long x = atomic_long_read(&memcg->vmstats[idx]); + long x = READ_ONCE(memcg->vmstats.state[idx]); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -803,17 +783,14 @@ static unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) return x; } -/* - * idx can be of type enum memcg_stat_item or node_stat_item. - * Keep in sync with memcg_exact_page_state(). - */ +/* idx can be of type enum memcg_stat_item or node_stat_item. */ static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) { long x = 0; int cpu; for_each_possible_cpu(cpu) - x += per_cpu(memcg->vmstats_local->stat[idx], cpu); + x += per_cpu(memcg->vmstats_percpu->state[idx], cpu); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -936,30 +913,16 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count) { - unsigned long x; - if (mem_cgroup_disabled()) return; - x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); - if (unlikely(x > MEMCG_CHARGE_BATCH)) { - struct mem_cgroup *mi; - - /* - * Batch local counters to keep them in sync with - * the hierarchical ones. - */ - __this_cpu_add(memcg->vmstats_local->events[idx], x); - for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) - atomic_long_add(x, &mi->vmevents[idx]); - x = 0; - } - __this_cpu_write(memcg->vmstats_percpu->events[idx], x); + __this_cpu_add(memcg->vmstats_percpu->events[idx], count); + cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); } static unsigned long memcg_events(struct mem_cgroup *memcg, int event) { - return atomic_long_read(&memcg->vmevents[event]); + return READ_ONCE(memcg->vmstats.events[event]); } static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) @@ -968,7 +931,7 @@ static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) int cpu; for_each_possible_cpu(cpu) - x += per_cpu(memcg->vmstats_local->events[event], cpu); + x += per_cpu(memcg->vmstats_percpu->events[event], cpu); return x; } @@ -1604,6 +1567,7 @@ static char *memory_stat_format(struct mem_cgroup *memcg) * * Current memory state: */ + cgroup_rstat_flush(memcg->css.cgroup); for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { u64 size; @@ -2409,22 +2373,11 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) drain_stock(stock); for_each_mem_cgroup(memcg) { - struct memcg_vmstats_percpu *statc; int i; - statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); - - for (i = 0; i < MEMCG_NR_STAT; i++) { + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { int nid; - if (statc->stat[i]) { - mod_memcg_state(memcg, i, statc->stat[i]); - statc->stat[i] = 0; - } - - if (i >= NR_VM_NODE_STAT_ITEMS) - continue; - for_each_node(nid) { struct batched_lruvec_stat *lstatc; struct mem_cgroup_per_node *pn; @@ -2443,13 +2396,6 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) } } } - - for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { - if (statc->events[i]) { - count_memcg_events(memcg, i, statc->events[i]); - statc->events[i] = 0; - } - } } return 0; @@ -3572,6 +3518,7 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) unsigned long val; if (mem_cgroup_is_root(memcg)) { + cgroup_rstat_flush(memcg->css.cgroup); val = memcg_page_state(memcg, NR_FILE_PAGES) + memcg_page_state(memcg, NR_ANON_MAPPED); if (swap) @@ -3636,26 +3583,15 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, } } -static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) +static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg) { - unsigned long stat[MEMCG_NR_STAT] = {0}; - struct mem_cgroup *mi; - int node, cpu, i; - - for_each_online_cpu(cpu) - for (i = 0; i < MEMCG_NR_STAT; i++) - stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); - - for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) - for (i = 0; i < MEMCG_NR_STAT; i++) - atomic_long_add(stat[i], &mi->vmstats[i]); + int node; for_each_node(node) { struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; + unsigned long stat[NR_VM_NODE_STAT_ITEMS] = { 0 }; struct mem_cgroup_per_node *pi; - - for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) - stat[i] = 0; + int cpu, i; for_each_online_cpu(cpu) for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) @@ -3668,25 +3604,6 @@ static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) } } -static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) -{ - unsigned long events[NR_VM_EVENT_ITEMS]; - struct mem_cgroup *mi; - int cpu, i; - - for (i = 0; i < NR_VM_EVENT_ITEMS; i++) - events[i] = 0; - - for_each_online_cpu(cpu) - for (i = 0; i < NR_VM_EVENT_ITEMS; i++) - events[i] += per_cpu(memcg->vmstats_percpu->events[i], - cpu); - - for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) - for (i = 0; i < NR_VM_EVENT_ITEMS; i++) - atomic_long_add(events[i], &mi->vmevents[i]); -} - #ifdef CONFIG_MEMCG_KMEM static int memcg_online_kmem(struct mem_cgroup *memcg) { @@ -4003,6 +3920,8 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v) int nid; struct mem_cgroup *memcg = mem_cgroup_from_seq(m); + cgroup_rstat_flush(memcg->css.cgroup); + for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { seq_printf(m, "%s=%lu", stat->name, mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, @@ -4073,6 +3992,8 @@ static int memcg_stat_show(struct seq_file *m, void *v) BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); + cgroup_rstat_flush(memcg->css.cgroup); + for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { unsigned long nr; @@ -4549,22 +4470,6 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) return &memcg->cgwb_domain; } -/* - * idx can be of type enum memcg_stat_item or node_stat_item. - * Keep in sync with memcg_exact_page(). - */ -static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) -{ - long x = atomic_long_read(&memcg->vmstats[idx]); - int cpu; - - for_each_online_cpu(cpu) - x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; - if (x < 0) - x = 0; - return x; -} - /** * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg * @wb: bdi_writeback in question @@ -4590,13 +4495,14 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); struct mem_cgroup *parent; - *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); + cgroup_rstat_flush_irqsafe(memcg->css.cgroup); - *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); - *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + - memcg_exact_page_state(memcg, NR_ACTIVE_FILE); - *pheadroom = PAGE_COUNTER_MAX; + *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); + *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); + *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + + memcg_page_state(memcg, NR_ACTIVE_FILE); + *pheadroom = PAGE_COUNTER_MAX; while ((parent = parent_mem_cgroup(memcg))) { unsigned long ceiling = min(READ_ONCE(memcg->memory.max), READ_ONCE(memcg->memory.high)); @@ -5228,7 +5134,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) for_each_node(node) free_mem_cgroup_per_node_info(memcg, node); free_percpu(memcg->vmstats_percpu); - free_percpu(memcg->vmstats_local); kfree(memcg); } @@ -5236,11 +5141,10 @@ static void mem_cgroup_free(struct mem_cgroup *memcg) { memcg_wb_domain_exit(memcg); /* - * Flush percpu vmstats and vmevents to guarantee the value correctness - * on parent's and all ancestor levels. + * Flush percpu lruvec stats to guarantee the value + * correctness on parent's and all ancestor levels. */ - memcg_flush_percpu_vmstats(memcg); - memcg_flush_percpu_vmevents(memcg); + memcg_flush_lruvec_page_state(memcg); __mem_cgroup_free(memcg); } @@ -5267,11 +5171,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) goto fail; } - memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu, - GFP_KERNEL_ACCOUNT); - if (!memcg->vmstats_local) - goto fail; - memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, GFP_KERNEL_ACCOUNT); if (!memcg->vmstats_percpu) @@ -5471,6 +5370,62 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) memcg_wb_domain_size_changed(memcg); } +static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct mem_cgroup *parent = parent_mem_cgroup(memcg); + struct memcg_vmstats_percpu *statc; + long delta, v; + int i; + + statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); + + for (i = 0; i < MEMCG_NR_STAT; i++) { + /* + * Collect the aggregated propagation counts of groups + * below us. We're in a per-cpu loop here and this is + * a global counter, so the first cycle will get them. + */ + delta = memcg->vmstats.state_pending[i]; + if (delta) + memcg->vmstats.state_pending[i] = 0; + + /* Add CPU changes on this level since the last flush */ + v = READ_ONCE(statc->state[i]); + if (v != statc->state_prev[i]) { + delta += v - statc->state_prev[i]; + statc->state_prev[i] = v; + } + + if (!delta) + continue; + + /* Aggregate counts on this level and propagate upwards */ + memcg->vmstats.state[i] += delta; + if (parent) + parent->vmstats.state_pending[i] += delta; + } + + for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { + delta = memcg->vmstats.events_pending[i]; + if (delta) + memcg->vmstats.events_pending[i] = 0; + + v = READ_ONCE(statc->events[i]); + if (v != statc->events_prev[i]) { + delta += v - statc->events_prev[i]; + statc->events_prev[i] = v; + } + + if (!delta) + continue; + + memcg->vmstats.events[i] += delta; + if (parent) + parent->vmstats.events_pending[i] += delta; + } +} + #ifdef CONFIG_MMU /* Handlers for move charge at task migration. */ static int mem_cgroup_do_precharge(unsigned long count) @@ -6524,6 +6479,7 @@ struct cgroup_subsys memory_cgrp_subsys = { .css_released = mem_cgroup_css_released, .css_free = mem_cgroup_css_free, .css_reset = mem_cgroup_css_reset, + .css_rstat_flush = mem_cgroup_css_rstat_flush, .can_attach = mem_cgroup_can_attach, .cancel_attach = mem_cgroup_cancel_attach, .post_attach = mem_cgroup_move_task, -- cgit v1.2.3-71-gd317 From 0add0c77a9bd0ce7cd3b53894fb08154881402a4 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 29 Apr 2021 22:56:36 -0700 Subject: memcg: charge before adding to swapcache on swapin Currently the kernel adds the page, allocated for swapin, to the swapcache before charging the page. This is fine but now we want a per-memcg swapcache stat which is essential for folks who wants to transparently migrate from cgroup v1's memsw to cgroup v2's memory and swap counters. In addition charging a page before exposing it to other parts of the kernel is a step in the right direction. To correctly maintain the per-memcg swapcache stat, this patch has adopted to charge the page before adding it to swapcache. One challenge in this option is the failure case of add_to_swap_cache() on which we need to undo the mem_cgroup_charge(). Specifically undoing mem_cgroup_uncharge_swap() is not simple. To resolve the issue, this patch decouples the charging for swapin pages from mem_cgroup_charge(). Two new functions are introduced, mem_cgroup_swapin_charge_page() for just charging the swapin page and mem_cgroup_swapin_uncharge_swap() for uncharging the swap slot once the page has been successfully added to the swapcache. [shakeelb@google.com: set page->private before calling swap_readpage] Link: https://lkml.kernel.org/r/20210318015959.2986837-1-shakeelb@google.com Link: https://lkml.kernel.org/r/20210305212639.775498-1-shakeelb@google.com Signed-off-by: Shakeel Butt Acked-by: Roman Gushchin Acked-by: Johannes Weiner Acked-by: Hugh Dickins Tested-by: Heiko Carstens Cc: Michal Hocko Cc: Stephen Rothwell Cc: Christian Borntraeger Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 13 +++++ mm/memcontrol.c | 117 ++++++++++++++++++++++++++++----------------- mm/memory.c | 16 +++---- mm/swap_state.c | 13 +++-- 4 files changed, 100 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 74910ce9a3f9..e946c96daa32 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -609,6 +609,9 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) } int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); +int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, + gfp_t gfp, swp_entry_t entry); +void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); @@ -1112,6 +1115,16 @@ static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, return 0; } +static inline int mem_cgroup_swapin_charge_page(struct page *page, + struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) +{ + return 0; +} + +static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) +{ +} + static inline void mem_cgroup_uncharge(struct page *page) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5582e1531b43..6d0863db3e41 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6644,6 +6644,27 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root, atomic_long_read(&parent->memory.children_low_usage))); } +static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg, + gfp_t gfp) +{ + unsigned int nr_pages = thp_nr_pages(page); + int ret; + + ret = try_charge(memcg, gfp, nr_pages); + if (ret) + goto out; + + css_get(&memcg->css); + commit_charge(page, memcg); + + local_irq_disable(); + mem_cgroup_charge_statistics(memcg, page, nr_pages); + memcg_check_events(memcg, page); + local_irq_enable(); +out: + return ret; +} + /** * mem_cgroup_charge - charge a newly allocated page to a cgroup * @page: page to charge @@ -6653,55 +6674,71 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root, * Try to charge @page to the memcg that @mm belongs to, reclaiming * pages according to @gfp_mask if necessary. * + * Do not use this for pages allocated for swapin. + * * Returns 0 on success. Otherwise, an error code is returned. */ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { - unsigned int nr_pages = thp_nr_pages(page); - struct mem_cgroup *memcg = NULL; - int ret = 0; + struct mem_cgroup *memcg; + int ret; if (mem_cgroup_disabled()) - goto out; + return 0; - if (PageSwapCache(page)) { - swp_entry_t ent = { .val = page_private(page), }; - unsigned short id; + memcg = get_mem_cgroup_from_mm(mm); + ret = __mem_cgroup_charge(page, memcg, gfp_mask); + css_put(&memcg->css); - /* - * Every swap fault against a single page tries to charge the - * page, bail as early as possible. shmem_unuse() encounters - * already charged pages, too. page and memcg binding is - * protected by the page lock, which serializes swap cache - * removal, which in turn serializes uncharging. - */ - VM_BUG_ON_PAGE(!PageLocked(page), page); - if (page_memcg(compound_head(page))) - goto out; + return ret; +} - id = lookup_swap_cgroup_id(ent); - rcu_read_lock(); - memcg = mem_cgroup_from_id(id); - if (memcg && !css_tryget_online(&memcg->css)) - memcg = NULL; - rcu_read_unlock(); - } +/** + * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin + * @page: page to charge + * @mm: mm context of the victim + * @gfp: reclaim mode + * @entry: swap entry for which the page is allocated + * + * This function charges a page allocated for swapin. Please call this before + * adding the page to the swapcache. + * + * Returns 0 on success. Otherwise, an error code is returned. + */ +int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, + gfp_t gfp, swp_entry_t entry) +{ + struct mem_cgroup *memcg; + unsigned short id; + int ret; - if (!memcg) + if (mem_cgroup_disabled()) + return 0; + + id = lookup_swap_cgroup_id(entry); + rcu_read_lock(); + memcg = mem_cgroup_from_id(id); + if (!memcg || !css_tryget_online(&memcg->css)) memcg = get_mem_cgroup_from_mm(mm); + rcu_read_unlock(); - ret = try_charge(memcg, gfp_mask, nr_pages); - if (ret) - goto out_put; + ret = __mem_cgroup_charge(page, memcg, gfp); - css_get(&memcg->css); - commit_charge(page, memcg); - - local_irq_disable(); - mem_cgroup_charge_statistics(memcg, page, nr_pages); - memcg_check_events(memcg, page); - local_irq_enable(); + css_put(&memcg->css); + return ret; +} +/* + * mem_cgroup_swapin_uncharge_swap - uncharge swap slot + * @entry: swap entry for which the page is charged + * + * Call this function after successfully adding the charged page to swapcache. + * + * Note: This function assumes the page for which swap slot is being uncharged + * is order 0 page. + */ +void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) +{ /* * Cgroup1's unified memory+swap counter has been charged with the * new swapcache page, finish the transfer by uncharging the swap @@ -6714,20 +6751,14 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) * correspond 1:1 to page and swap slot lifetimes: we charge the * page to memory here, and uncharge swap when the slot is freed. */ - if (do_memsw_account() && PageSwapCache(page)) { - swp_entry_t entry = { .val = page_private(page) }; + if (!mem_cgroup_disabled() && do_memsw_account()) { /* * The swap entry might not get freed for a long time, * let's not wait for it. The page already received a * memory+swap charge, drop the swap entry duplicate. */ - mem_cgroup_uncharge_swap(entry, nr_pages); + mem_cgroup_uncharge_swap(entry, 1); } - -out_put: - css_put(&memcg->css); -out: - return ret; } struct uncharge_gather { diff --git a/mm/memory.c b/mm/memory.c index 550405fc3b5e..3196fa5e7f7d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3309,28 +3309,26 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); if (page) { - int err; - __SetPageLocked(page); __SetPageSwapBacked(page); - set_page_private(page, entry.val); - /* Tell memcg to use swap ownership records */ - SetPageSwapCache(page); - err = mem_cgroup_charge(page, vma->vm_mm, - GFP_KERNEL); - ClearPageSwapCache(page); - if (err) { + if (mem_cgroup_swapin_charge_page(page, + vma->vm_mm, GFP_KERNEL, entry)) { ret = VM_FAULT_OOM; goto out_page; } + mem_cgroup_swapin_uncharge_swap(entry); shadow = get_shadow_from_swap_cache(entry); if (shadow) workingset_refault(page, shadow); lru_cache_add(page); + + /* To provide entry to swap_readpage() */ + set_page_private(page, entry.val); swap_readpage(page, true); + set_page_private(page, 0); } } else { page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, diff --git a/mm/swap_state.c b/mm/swap_state.c index 3cdee7b11da9..fb7efa08fe57 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -497,16 +497,14 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, __SetPageLocked(page); __SetPageSwapBacked(page); - /* May fail (-ENOMEM) if XArray node allocation failed. */ - if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { - put_swap_page(page, entry); + if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry)) goto fail_unlock; - } - if (mem_cgroup_charge(page, NULL, gfp_mask)) { - delete_from_swap_cache(page); + /* May fail (-ENOMEM) if XArray node allocation failed. */ + if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) goto fail_unlock; - } + + mem_cgroup_swapin_uncharge_swap(entry); if (shadow) workingset_refault(page, shadow); @@ -517,6 +515,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, return page; fail_unlock: + put_swap_page(page, entry); unlock_page(page); put_page(page); return NULL; -- cgit v1.2.3-71-gd317 From b4e0b68fbd9d1fd7e31cbe8adca3ad6cf556e2ee Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Thu, 29 Apr 2021 22:56:52 -0700 Subject: mm: memcontrol: use obj_cgroup APIs to charge kmem pages Since Roman's series "The new cgroup slab memory controller" applied. All slab objects are charged via the new APIs of obj_cgroup. The new APIs introduce a struct obj_cgroup to charge slab objects. It prevents long-living objects from pinning the original memory cgroup in the memory. But there are still some corner objects (e.g. allocations larger than order-1 page on SLUB) which are not charged via the new APIs. Those objects (include the pages which are allocated from buddy allocator directly) are charged as kmem pages which still hold a reference to the memory cgroup. We want to reuse the obj_cgroup APIs to charge the kmem pages. If we do that, we should store an object cgroup pointer to page->memcg_data for the kmem pages. Finally, page->memcg_data will have 3 different meanings. 1) For the slab pages, page->memcg_data points to an object cgroups vector. 2) For the kmem pages (exclude the slab pages), page->memcg_data points to an object cgroup. 3) For the user pages (e.g. the LRU pages), page->memcg_data points to a memory cgroup. We do not change the behavior of page_memcg() and page_memcg_rcu(). They are also suitable for LRU pages and kmem pages. Why? Because memory allocations pinning memcgs for a long time - it exists at a larger scale and is causing recurring problems in the real world: page cache doesn't get reclaimed for a long time, or is used by the second, third, fourth, ... instance of the same job that was restarted into a new cgroup every time. Unreclaimable dying cgroups pile up, waste memory, and make page reclaim very inefficient. We can convert LRU pages and most other raw memcg pins to the objcg direction to fix this problem, and then the page->memcg will always point to an object cgroup pointer. At that time, LRU pages and kmem pages will be treated the same. The implementation of page_memcg() will remove the kmem page check. This patch aims to charge the kmem pages by using the new APIs of obj_cgroup. Finally, the page->memcg_data of the kmem page points to an object cgroup. We can use the __page_objcg() to get the object cgroup associated with a kmem page. Or we can use page_memcg() to get the memory cgroup associated with a kmem page, but caller must ensure that the returned memcg won't be released (e.g. acquire the rcu_read_lock or css_set_lock). Link: https://lkml.kernel.org/r/20210401030141.37061-1-songmuchun@bytedance.com Link: https://lkml.kernel.org/r/20210319163821.20704-6-songmuchun@bytedance.com Signed-off-by: Muchun Song Acked-by: Johannes Weiner Reviewed-by: Shakeel Butt Acked-by: Roman Gushchin Reviewed-by: Miaohe Lin Cc: Michal Hocko Cc: Vladimir Davydov Cc: Xiongchun Duan Cc: Christian Borntraeger [songmuchun@bytedance.com: fix forget to obtain the ref to objcg in split_page_memcg] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 120 ++++++++++++++++++++++++++++++++++++--------- mm/memcontrol.c | 116 +++++++++++++++++++++---------------------- 2 files changed, 155 insertions(+), 81 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e946c96daa32..78ca34c935ab 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -371,6 +371,62 @@ enum page_memcg_data_flags { #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) +static inline bool PageMemcgKmem(struct page *page); + +/* + * After the initialization objcg->memcg is always pointing at + * a valid memcg, but can be atomically swapped to the parent memcg. + * + * The caller must ensure that the returned memcg won't be released: + * e.g. acquire the rcu_read_lock or css_set_lock. + */ +static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) +{ + return READ_ONCE(objcg->memcg); +} + +/* + * __page_memcg - get the memory cgroup associated with a non-kmem page + * @page: a pointer to the page struct + * + * Returns a pointer to the memory cgroup associated with the page, + * or NULL. This function assumes that the page is known to have a + * proper memory cgroup pointer. It's not safe to call this function + * against some type of pages, e.g. slab pages or ex-slab pages or + * kmem pages. + */ +static inline struct mem_cgroup *__page_memcg(struct page *page) +{ + unsigned long memcg_data = page->memcg_data; + + VM_BUG_ON_PAGE(PageSlab(page), page); + VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); + VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); + + return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); +} + +/* + * __page_objcg - get the object cgroup associated with a kmem page + * @page: a pointer to the page struct + * + * Returns a pointer to the object cgroup associated with the page, + * or NULL. This function assumes that the page is known to have a + * proper object cgroup pointer. It's not safe to call this function + * against some type of pages, e.g. slab pages or ex-slab pages or + * LRU pages. + */ +static inline struct obj_cgroup *__page_objcg(struct page *page) +{ + unsigned long memcg_data = page->memcg_data; + + VM_BUG_ON_PAGE(PageSlab(page), page); + VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); + VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page); + + return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); +} + /* * page_memcg - get the memory cgroup associated with a page * @page: a pointer to the page struct @@ -380,20 +436,23 @@ enum page_memcg_data_flags { * proper memory cgroup pointer. It's not safe to call this function * against some type of pages, e.g. slab pages or ex-slab pages. * - * Any of the following ensures page and memcg binding stability: + * For a non-kmem page any of the following ensures page and memcg binding + * stability: + * * - the page lock * - LRU isolation * - lock_page_memcg() * - exclusive reference + * + * For a kmem page a caller should hold an rcu read lock to protect memcg + * associated with a kmem page from being released. */ static inline struct mem_cgroup *page_memcg(struct page *page) { - unsigned long memcg_data = page->memcg_data; - - VM_BUG_ON_PAGE(PageSlab(page), page); - VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); - - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + if (PageMemcgKmem(page)) + return obj_cgroup_memcg(__page_objcg(page)); + else + return __page_memcg(page); } /* @@ -407,11 +466,19 @@ static inline struct mem_cgroup *page_memcg(struct page *page) */ static inline struct mem_cgroup *page_memcg_rcu(struct page *page) { + unsigned long memcg_data = READ_ONCE(page->memcg_data); + VM_BUG_ON_PAGE(PageSlab(page), page); WARN_ON_ONCE(!rcu_read_lock_held()); - return (struct mem_cgroup *)(READ_ONCE(page->memcg_data) & - ~MEMCG_DATA_FLAGS_MASK); + if (memcg_data & MEMCG_DATA_KMEM) { + struct obj_cgroup *objcg; + + objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return obj_cgroup_memcg(objcg); + } + + return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); } /* @@ -419,15 +486,21 @@ static inline struct mem_cgroup *page_memcg_rcu(struct page *page) * @page: a pointer to the page struct * * Returns a pointer to the memory cgroup associated with the page, - * or NULL. This function unlike page_memcg() can take any page + * or NULL. This function unlike page_memcg() can take any page * as an argument. It has to be used in cases when it's not known if a page - * has an associated memory cgroup pointer or an object cgroups vector. + * has an associated memory cgroup pointer or an object cgroups vector or + * an object cgroup. + * + * For a non-kmem page any of the following ensures page and memcg binding + * stability: * - * Any of the following ensures page and memcg binding stability: * - the page lock * - LRU isolation * - lock_page_memcg() * - exclusive reference + * + * For a kmem page a caller should hold an rcu read lock to protect memcg + * associated with a kmem page from being released. */ static inline struct mem_cgroup *page_memcg_check(struct page *page) { @@ -440,6 +513,13 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page) if (memcg_data & MEMCG_DATA_OBJCGS) return NULL; + if (memcg_data & MEMCG_DATA_KMEM) { + struct obj_cgroup *objcg; + + objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return obj_cgroup_memcg(objcg); + } + return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); } @@ -718,21 +798,15 @@ static inline void obj_cgroup_get(struct obj_cgroup *objcg) percpu_ref_get(&objcg->refcnt); } -static inline void obj_cgroup_put(struct obj_cgroup *objcg) +static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, + unsigned long nr) { - percpu_ref_put(&objcg->refcnt); + percpu_ref_get_many(&objcg->refcnt, nr); } -/* - * After the initialization objcg->memcg is always pointing at - * a valid memcg, but can be atomically swapped to the parent memcg. - * - * The caller must ensure that the returned memcg won't be released: - * e.g. acquire the rcu_read_lock or css_set_lock. - */ -static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) +static inline void obj_cgroup_put(struct obj_cgroup *objcg) { - return READ_ONCE(objcg->memcg); + percpu_ref_put(&objcg->refcnt); } static inline void mem_cgroup_put(struct mem_cgroup *memcg) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 85fd2d86d23f..f3ddf94c9485 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -865,18 +865,22 @@ void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) { struct page *head = compound_head(page); /* rmap on tail pages */ - struct mem_cgroup *memcg = page_memcg(head); + struct mem_cgroup *memcg; pg_data_t *pgdat = page_pgdat(page); struct lruvec *lruvec; + rcu_read_lock(); + memcg = page_memcg(head); /* Untracked pages have no memcg, no lruvec. Update only the node */ if (!memcg) { + rcu_read_unlock(); __mod_node_page_state(pgdat, idx, val); return; } lruvec = mem_cgroup_lruvec(memcg, pgdat); __mod_lruvec_state(lruvec, idx, val); + rcu_read_unlock(); } EXPORT_SYMBOL(__mod_lruvec_page_state); @@ -1051,20 +1055,6 @@ static __always_inline struct mem_cgroup *active_memcg(void) return current->active_memcg; } -static __always_inline struct mem_cgroup *get_active_memcg(void) -{ - struct mem_cgroup *memcg; - - rcu_read_lock(); - memcg = active_memcg(); - /* remote memcg must hold a ref. */ - if (memcg && WARN_ON_ONCE(!css_tryget(&memcg->css))) - memcg = root_mem_cgroup; - rcu_read_unlock(); - - return memcg; -} - static __always_inline bool memcg_kmem_bypass(void) { /* Allow remote memcg charging from any context. */ @@ -1078,20 +1068,6 @@ static __always_inline bool memcg_kmem_bypass(void) return false; } -/** - * If active memcg is set, do not fallback to current->mm->memcg. - */ -static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) -{ - if (memcg_kmem_bypass()) - return NULL; - - if (unlikely(active_memcg())) - return get_active_memcg(); - - return get_mem_cgroup_from_mm(current->mm); -} - /** * mem_cgroup_iter - iterate over memory cgroup hierarchy * @root: hierarchy root @@ -3121,18 +3097,18 @@ static void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_page */ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) { - struct mem_cgroup *memcg; + struct obj_cgroup *objcg; int ret = 0; - memcg = get_mem_cgroup_from_current(); - if (memcg && !mem_cgroup_is_root(memcg)) { - ret = __memcg_kmem_charge(memcg, gfp, 1 << order); + objcg = get_obj_cgroup_from_current(); + if (objcg) { + ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); if (!ret) { - page->memcg_data = (unsigned long)memcg | + page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM; return 0; } - css_put(&memcg->css); + obj_cgroup_put(objcg); } return ret; } @@ -3144,16 +3120,16 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) */ void __memcg_kmem_uncharge_page(struct page *page, int order) { - struct mem_cgroup *memcg = page_memcg(page); + struct obj_cgroup *objcg; unsigned int nr_pages = 1 << order; - if (!memcg) + if (!PageMemcgKmem(page)) return; - VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); - __memcg_kmem_uncharge(memcg, nr_pages); + objcg = __page_objcg(page); + obj_cgroup_uncharge_pages(objcg, nr_pages); page->memcg_data = 0; - css_put(&memcg->css); + obj_cgroup_put(objcg); } static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) @@ -3294,7 +3270,11 @@ void split_page_memcg(struct page *head, unsigned int nr) for (i = 1; i < nr; i++) head[i].memcg_data = head->memcg_data; - css_get_many(&memcg->css, nr - 1); + + if (PageMemcgKmem(head)) + obj_cgroup_get_many(__page_objcg(head), nr - 1); + else + css_get_many(&memcg->css, nr - 1); } #ifdef CONFIG_MEMCG_SWAP @@ -6788,7 +6768,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) struct uncharge_gather { struct mem_cgroup *memcg; - unsigned long nr_pages; + unsigned long nr_memory; unsigned long pgpgout; unsigned long nr_kmem; struct page *dummy_page; @@ -6803,10 +6783,10 @@ static void uncharge_batch(const struct uncharge_gather *ug) { unsigned long flags; - if (!mem_cgroup_is_root(ug->memcg)) { - page_counter_uncharge(&ug->memcg->memory, ug->nr_pages); + if (ug->nr_memory) { + page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); if (do_memsw_account()) - page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages); + page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); memcg_oom_recover(ug->memcg); @@ -6814,7 +6794,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) local_irq_save(flags); __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); - __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); + __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); memcg_check_events(ug->memcg, ug->dummy_page); local_irq_restore(flags); @@ -6825,40 +6805,60 @@ static void uncharge_batch(const struct uncharge_gather *ug) static void uncharge_page(struct page *page, struct uncharge_gather *ug) { unsigned long nr_pages; + struct mem_cgroup *memcg; + struct obj_cgroup *objcg; VM_BUG_ON_PAGE(PageLRU(page), page); - if (!page_memcg(page)) - return; - /* * Nobody should be changing or seriously looking at - * page_memcg(page) at this point, we have fully + * page memcg or objcg at this point, we have fully * exclusive access to the page. */ + if (PageMemcgKmem(page)) { + objcg = __page_objcg(page); + /* + * This get matches the put at the end of the function and + * kmem pages do not hold memcg references anymore. + */ + memcg = get_mem_cgroup_from_objcg(objcg); + } else { + memcg = __page_memcg(page); + } - if (ug->memcg != page_memcg(page)) { + if (!memcg) + return; + + if (ug->memcg != memcg) { if (ug->memcg) { uncharge_batch(ug); uncharge_gather_clear(ug); } - ug->memcg = page_memcg(page); + ug->memcg = memcg; ug->dummy_page = page; /* pairs with css_put in uncharge_batch */ - css_get(&ug->memcg->css); + css_get(&memcg->css); } nr_pages = compound_nr(page); - ug->nr_pages += nr_pages; - if (PageMemcgKmem(page)) + if (PageMemcgKmem(page)) { + ug->nr_memory += nr_pages; ug->nr_kmem += nr_pages; - else + + page->memcg_data = 0; + obj_cgroup_put(objcg); + } else { + /* LRU pages aren't accounted at the root level */ + if (!mem_cgroup_is_root(memcg)) + ug->nr_memory += nr_pages; ug->pgpgout++; - page->memcg_data = 0; - css_put(&ug->memcg->css); + page->memcg_data = 0; + } + + css_put(&memcg->css); } /** -- cgit v1.2.3-71-gd317 From bd290e1e75d8a8b2d87031b63db56ae165677870 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Thu, 29 Apr 2021 22:56:58 -0700 Subject: mm: memcontrol: move PageMemcgKmem to the scope of CONFIG_MEMCG_KMEM The page only can be marked as kmem when CONFIG_MEMCG_KMEM is enabled. So move PageMemcgKmem() to the scope of the CONFIG_MEMCG_KMEM. As a bonus, on !CONFIG_MEMCG_KMEM build some code can be compiled out. Link: https://lkml.kernel.org/r/20210319163821.20704-8-songmuchun@bytedance.com Signed-off-by: Muchun Song Acked-by: Roman Gushchin Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Cc: Xiongchun Duan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 78ca34c935ab..bcf92b99f001 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -523,6 +523,7 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page) return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); } +#ifdef CONFIG_MEMCG_KMEM /* * PageMemcgKmem - check if the page has MemcgKmem flag set * @page: a pointer to the page struct @@ -537,7 +538,6 @@ static inline bool PageMemcgKmem(struct page *page) return page->memcg_data & MEMCG_DATA_KMEM; } -#ifdef CONFIG_MEMCG_KMEM /* * page_objcgs - get the object cgroups vector associated with a page * @page: a pointer to the page struct @@ -579,6 +579,11 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page) } #else +static inline bool PageMemcgKmem(struct page *page) +{ + return false; +} + static inline struct obj_cgroup **page_objcgs(struct page *page) { return NULL; -- cgit v1.2.3-71-gd317 From a10e995749a6c65833edd201c55665e5d44d14fc Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Thu, 29 Apr 2021 22:57:01 -0700 Subject: linux/memcontrol.h: remove duplicate struct declaration struct mem_cgroup is declared twice. One has been declared at forward struct declaration. Remove the duplicate. Link: https://lkml.kernel.org/r/20210330020246.2265371-1-wanjiabing@vivo.com Signed-off-by: Wan Jiabing Reviewed-by: Muchun Song Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index bcf92b99f001..5904716f29ba 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1123,8 +1123,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, #define MEM_CGROUP_ID_SHIFT 0 #define MEM_CGROUP_ID_MAX 0 -struct mem_cgroup; - static inline struct mem_cgroup *page_memcg(struct page *page) { return NULL; -- cgit v1.2.3-71-gd317 From 74ffa5a3e68504dd289135b1cf0422c19ffb3f2e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 29 Apr 2021 22:57:29 -0700 Subject: mm: add remap_pfn_range_notrack Patch series "add remap_pfn_range_notrack instead of reinventing it in i915", v2. i915 has some reason to want to avoid the track_pfn_remap overhead in remap_pfn_range. Add a function to the core VM to do just that rather than reinventing the functionality poorly in the driver. Note that the remap_io_sg path does get exercises when using Xorg on my Thinkpad X1, so this should be considered lightly tested, I've not managed to hit the remap_io_mapping path at all. This patch (of 4): Add a version of remap_pfn_range that does not call track_pfn_range. This will be used to fix horrible abuses of VM internals in the i915 driver. Link: https://lkml.kernel.org/r/20210326055505.1424432-1-hch@lst.de Link: https://lkml.kernel.org/r/20210326055505.1424432-2-hch@lst.de Signed-off-by: Christoph Hellwig Acked-by: Daniel Vetter Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Chris Wilson Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ mm/memory.c | 51 +++++++++++++++++++++++++++++++-------------------- 2 files changed, 33 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 64be3baf861a..a8335cecf706 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2732,6 +2732,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); +int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); diff --git a/mm/memory.c b/mm/memory.c index f95d12032b34..8dc69471be6c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2260,26 +2260,17 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, return 0; } -/** - * remap_pfn_range - remap kernel memory to userspace - * @vma: user vma to map to - * @addr: target page aligned user address to start at - * @pfn: page frame number of kernel physical memory address - * @size: size of mapping area - * @prot: page protection flags for this mapping - * - * Note: this is only safe if the mm semaphore is held when called. - * - * Return: %0 on success, negative error code otherwise. +/* + * Variant of remap_pfn_range that does not call track_pfn_remap. The caller + * must have pre-validated the caching bits of the pgprot_t. */ -int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn, unsigned long size, pgprot_t prot) +int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot) { pgd_t *pgd; unsigned long next; unsigned long end = addr + PAGE_ALIGN(size); struct mm_struct *mm = vma->vm_mm; - unsigned long remap_pfn = pfn; int err; if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) @@ -2309,10 +2300,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, vma->vm_pgoff = pfn; } - err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); - if (err) - return -EINVAL; - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; BUG_ON(addr >= end); @@ -2324,12 +2311,36 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, err = remap_p4d_range(mm, pgd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) - break; + return err; } while (pgd++, addr = next, addr != end); + return 0; +} + +/** + * remap_pfn_range - remap kernel memory to userspace + * @vma: user vma to map to + * @addr: target page aligned user address to start at + * @pfn: page frame number of kernel physical memory address + * @size: size of mapping area + * @prot: page protection flags for this mapping + * + * Note: this is only safe if the mm semaphore is held when called. + * + * Return: %0 on success, negative error code otherwise. + */ +int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + int err; + + err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); if (err) - untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size)); + return -EINVAL; + err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); + if (err) + untrack_pfn(vma, pfn, PAGE_ALIGN(size)); return err; } EXPORT_SYMBOL(remap_pfn_range); -- cgit v1.2.3-71-gd317 From 1fbaf8fc12a0136c7e62e7ad6fe886fe1749912c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 29 Apr 2021 22:57:32 -0700 Subject: mm: add a io_mapping_map_user helper Add a helper that calls remap_pfn_range for an struct io_mapping, relying on the pgprot pre-validation done when creating the mapping instead of doing it at runtime. Link: https://lkml.kernel.org/r/20210326055505.1424432-3-hch@lst.de Signed-off-by: Christoph Hellwig Cc: Chris Wilson Cc: Daniel Vetter Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Peter Zijlstra Cc: Rodrigo Vivi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/io-mapping.h | 3 +++ mm/Kconfig | 3 +++ mm/Makefile | 1 + mm/io-mapping.c | 29 +++++++++++++++++++++++++++++ 4 files changed, 36 insertions(+) create mode 100644 mm/io-mapping.c (limited to 'include/linux') diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index c093e81310a9..e9743cfd8585 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -220,3 +220,6 @@ io_mapping_free(struct io_mapping *iomap) } #endif /* _LINUX_IO_MAPPING_H */ + +int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size); diff --git a/mm/Kconfig b/mm/Kconfig index d0808a23e54b..8a27c7c91428 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -871,4 +871,7 @@ config MAPPING_DIRTY_HELPERS config KMAP_LOCAL bool +# struct io_mapping based helper. Selected by drivers that need them +config IO_MAPPING + bool endmenu diff --git a/mm/Makefile b/mm/Makefile index 72227b24a616..c0135e385984 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -120,3 +120,4 @@ obj-$(CONFIG_MEMFD_CREATE) += memfd.o obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o +obj-$(CONFIG_IO_MAPPING) += io-mapping.o diff --git a/mm/io-mapping.c b/mm/io-mapping.c new file mode 100644 index 000000000000..01b362799930 --- /dev/null +++ b/mm/io-mapping.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +/** + * io_mapping_map_user - remap an I/O mapping to userspace + * @iomap: the source io_mapping + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: physical address of kernel memory + * @size: size of map area + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size) +{ + vm_flags_t expected_flags = VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + + if (WARN_ON_ONCE((vma->vm_flags & expected_flags) != expected_flags)) + return -EINVAL; + + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + return remap_pfn_range_notrack(vma, addr, pfn, size, + __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK))); +} +EXPORT_SYMBOL_GPL(io_mapping_map_user); -- cgit v1.2.3-71-gd317 From 14d071134c740cfe61c09fc506fd3ab052beea10 Mon Sep 17 00:00:00 2001 From: Brian Geffon Date: Thu, 29 Apr 2021 22:57:48 -0700 Subject: Revert "mremap: don't allow MREMAP_DONTUNMAP on special_mappings and aio" This reverts commit cd544fd1dc9293c6702fab6effa63dac1cc67e99. As discussed in [1] this commit was a no-op because the mapping type was checked in vma_to_resize before move_vma is ever called. This meant that vm_ops->mremap() would never be called on such mappings. Furthermore, we've since expanded support of MREMAP_DONTUNMAP to non-anonymous mappings, and these special mappings are still protected by the existing check of !VM_DONTEXPAND and !VM_PFNMAP which will result in a -EINVAL. 1. https://lkml.org/lkml/2020/12/28/2340 Link: https://lkml.kernel.org/r/20210323182520.2712101-2-bgeffon@google.com Signed-off-by: Brian Geffon Acked-by: Hugh Dickins Reviewed-by: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Alejandro Colomar Cc: Andrea Arcangeli Cc: Andy Lutomirski Cc: Axel Rasmussen Cc: "Kirill A . Shutemov" Cc: Lokesh Gidra Cc: Michael Kerrisk Cc: "Michael S . Tsirkin" Cc: Mike Rapoport Cc: Minchan Kim Cc: Peter Xu Cc: Sonny Rao Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 2 +- fs/aio.c | 5 +---- include/linux/mm.h | 2 +- mm/mmap.c | 6 +----- mm/mremap.c | 2 +- 5 files changed, 5 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 935af2ac6b1a..05a89e33fde2 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -1458,7 +1458,7 @@ static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) return 0; } -static int pseudo_lock_dev_mremap(struct vm_area_struct *area, unsigned long flags) +static int pseudo_lock_dev_mremap(struct vm_area_struct *area) { /* Not supported */ return -EINVAL; diff --git a/fs/aio.c b/fs/aio.c index 1f32da13d39e..76ce0cc3ee4e 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -323,16 +323,13 @@ static void aio_free_ring(struct kioctx *ctx) } } -static int aio_ring_mremap(struct vm_area_struct *vma, unsigned long flags) +static int aio_ring_mremap(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct mm_struct *mm = vma->vm_mm; struct kioctx_table *table; int i, res = -EINVAL; - if (flags & MREMAP_DONTUNMAP) - return -EINVAL; - spin_lock(&mm->ioctx_lock); rcu_read_lock(); table = rcu_dereference(mm->ioctx_table); diff --git a/include/linux/mm.h b/include/linux/mm.h index a8335cecf706..93097dbd9604 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -580,7 +580,7 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); /* Called any time before splitting to check if it's allowed */ int (*may_split)(struct vm_area_struct *area, unsigned long addr); - int (*mremap)(struct vm_area_struct *area, unsigned long flags); + int (*mremap)(struct vm_area_struct *area); /* * Called by mprotect() to make driver-specific permission * checks before mprotect() is finalised. The VMA must not diff --git a/mm/mmap.c b/mm/mmap.c index 1d96a21acb2f..347ef9b83bb5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3409,14 +3409,10 @@ static const char *special_mapping_name(struct vm_area_struct *vma) return ((struct vm_special_mapping *)vma->vm_private_data)->name; } -static int special_mapping_mremap(struct vm_area_struct *new_vma, - unsigned long flags) +static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; - if (flags & MREMAP_DONTUNMAP) - return -EINVAL; - if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) return -EFAULT; diff --git a/mm/mremap.c b/mm/mremap.c index db5b8b28c2dd..d22629ff8f3c 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -545,7 +545,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (moved_len < old_len) { err = -ENOMEM; } else if (vma->vm_ops && vma->vm_ops->mremap) { - err = vma->vm_ops->mremap(new_vma, flags); + err = vma->vm_ops->mremap(new_vma); } if (unlikely(err)) { -- cgit v1.2.3-71-gd317 From bbc180a5adb05ee8053fab7a0c0bd56c5964240e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:26 -0700 Subject: mm: HUGE_VMAP arch support cleanup This changes the awkward approach where architectures provide init functions to determine which levels they can provide large mappings for, to one where the arch is queried for each call. This removes code and indirection, and allows constant-folding of dead code for unsupported levels. This also adds a prot argument to the arch query. This is unused currently but could help with some architectures (e.g., some powerpc processors can't map uncacheable memory with large pages). Link: https://lkml.kernel.org/r/20210317062402.533919-7-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Ding Tianhong Acked-by: Catalin Marinas [arm64] Cc: Will Deacon Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Christoph Hellwig Cc: Miaohe Lin Cc: Michael Ellerman Cc: Russell King Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/vmalloc.h | 8 +++ arch/arm64/mm/mmu.c | 10 ++-- arch/powerpc/include/asm/vmalloc.h | 8 +++ arch/powerpc/mm/book3s64/radix_pgtable.c | 8 +-- arch/x86/include/asm/vmalloc.h | 7 +++ arch/x86/mm/ioremap.c | 12 ++-- include/linux/io.h | 9 --- include/linux/vmalloc.h | 6 ++ init/main.c | 1 - mm/debug_vm_pgtable.c | 4 +- mm/ioremap.c | 94 ++++++++++++++------------------ 11 files changed, 87 insertions(+), 80 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index 2ca708ab9b20..597b40405319 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -1,4 +1,12 @@ #ifndef _ASM_ARM64_VMALLOC_H #define _ASM_ARM64_VMALLOC_H +#include + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d563335ad43f..8436e0755361 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1339,12 +1339,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) return dt_virt; } -int __init arch_ioremap_p4d_supported(void) +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; } -int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { /* * Only 4k granule supports level 1 block mappings. @@ -1354,9 +1354,9 @@ int __init arch_ioremap_pud_supported(void) !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { - /* See arch_ioremap_pud_supported() */ + /* See arch_vmap_pud_supported() */ return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h index b992dfaaa161..105abb73f075 100644 --- a/arch/powerpc/include/asm/vmalloc.h +++ b/arch/powerpc/include/asm/vmalloc.h @@ -1,4 +1,12 @@ #ifndef _ASM_POWERPC_VMALLOC_H #define _ASM_POWERPC_VMALLOC_H +#include + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_POWERPC_VMALLOC_H */ diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 98f0b243c1ab..743807fc210f 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1082,13 +1082,13 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, set_pte_at(mm, addr, ptep, pte); } -int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { /* HPT does not cope with large pages in the vmalloc area */ return radix_enabled(); } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { return radix_enabled(); } @@ -1182,7 +1182,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) return 1; } -int __init arch_ioremap_p4d_supported(void) +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; } diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h index 29837740b520..094ea2b565f3 100644 --- a/arch/x86/include/asm/vmalloc.h +++ b/arch/x86/include/asm/vmalloc.h @@ -1,6 +1,13 @@ #ifndef _ASM_X86_VMALLOC_H #define _ASM_X86_VMALLOC_H +#include #include +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_X86_VMALLOC_H */ diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 9e5ccc56f8e0..fbaf0c447986 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -481,24 +481,26 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); -int __init arch_ioremap_p4d_supported(void) +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; } -int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { #ifdef CONFIG_X86_64 return boot_cpu_has(X86_FEATURE_GBPAGES); #else - return 0; + return false; #endif } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { return boot_cpu_has(X86_FEATURE_PSE); } +#endif /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem diff --git a/include/linux/io.h b/include/linux/io.h index 61ff7d6278b6..9595151d800d 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -31,15 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, } #endif -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -void __init ioremap_huge_init(void); -int arch_ioremap_p4d_supported(void); -int arch_ioremap_pud_supported(void); -int arch_ioremap_pmd_supported(void); -#else -static inline void ioremap_huge_init(void) { } -#endif - /* * Managed iomap interface */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3de7be6dd17c..358c51c702c0 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -78,6 +78,12 @@ struct vmap_area { }; }; +#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP +static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; } +static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; } +static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; } +#endif + /* * Highlevel APIs for driver use */ diff --git a/init/main.c b/init/main.c index f498aac26e8c..ae96c79ad2d3 100644 --- a/init/main.c +++ b/init/main.c @@ -837,7 +837,6 @@ static void __init mm_init(void) pgtable_init(); debug_objects_mem_init(); vmalloc_init(); - ioremap_huge_init(); /* Should be run before the first non-init thread is created */ init_espfix_bsp(); /* Should be run after espfix64 is set up. */ diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index a9bd6ce1ba02..05efe98a9ac2 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -247,7 +247,7 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { pmd_t pmd; - if (!arch_ioremap_pmd_supported()) + if (!arch_vmap_pmd_supported(prot)) return; pr_debug("Validating PMD huge\n"); @@ -385,7 +385,7 @@ static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { pud_t pud; - if (!arch_ioremap_pud_supported()) + if (!arch_vmap_pud_supported(prot)) return; pr_debug("Validating PUD huge\n"); diff --git a/mm/ioremap.c b/mm/ioremap.c index 3f4d36f9745a..3264d0203785 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -16,49 +16,16 @@ #include "pgalloc-track.h" #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static int __read_mostly ioremap_p4d_capable; -static int __read_mostly ioremap_pud_capable; -static int __read_mostly ioremap_pmd_capable; -static int __read_mostly ioremap_huge_disabled; +static bool __ro_after_init iomap_max_page_shift = PAGE_SHIFT; static int __init set_nohugeiomap(char *str) { - ioremap_huge_disabled = 1; + iomap_max_page_shift = P4D_SHIFT; return 0; } early_param("nohugeiomap", set_nohugeiomap); - -void __init ioremap_huge_init(void) -{ - if (!ioremap_huge_disabled) { - if (arch_ioremap_p4d_supported()) - ioremap_p4d_capable = 1; - if (arch_ioremap_pud_supported()) - ioremap_pud_capable = 1; - if (arch_ioremap_pmd_supported()) - ioremap_pmd_capable = 1; - } -} - -static inline int ioremap_p4d_enabled(void) -{ - return ioremap_p4d_capable; -} - -static inline int ioremap_pud_enabled(void) -{ - return ioremap_pud_capable; -} - -static inline int ioremap_pmd_enabled(void) -{ - return ioremap_pmd_capable; -} - -#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ -static inline int ioremap_p4d_enabled(void) { return 0; } -static inline int ioremap_pud_enabled(void) { return 0; } -static inline int ioremap_pmd_enabled(void) { return 0; } +#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +static const bool iomap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, @@ -82,9 +49,13 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_pmd_enabled()) + if (max_page_shift < PMD_SHIFT) + return 0; + + if (!arch_vmap_pmd_supported(prot)) return 0; if ((end - addr) != PMD_SIZE) @@ -104,7 +75,7 @@ static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; @@ -115,7 +86,8 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, do { next = pmd_addr_end(addr, end); - if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { + if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, + max_page_shift)) { *mask |= PGTBL_PMD_MODIFIED; continue; } @@ -127,9 +99,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, } static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_pud_enabled()) + if (max_page_shift < PUD_SHIFT) + return 0; + + if (!arch_vmap_pud_supported(prot)) return 0; if ((end - addr) != PUD_SIZE) @@ -149,7 +125,7 @@ static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; @@ -160,21 +136,27 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, do { next = pud_addr_end(addr, end); - if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot)) { + if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, + max_page_shift)) { *mask |= PGTBL_PUD_MODIFIED; continue; } - if (vmap_pmd_range(pud, addr, next, phys_addr, prot, mask)) + if (vmap_pmd_range(pud, addr, next, phys_addr, prot, + max_page_shift, mask)) return -ENOMEM; } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_p4d_enabled()) + if (max_page_shift < P4D_SHIFT) + return 0; + + if (!arch_vmap_p4d_supported(prot)) return 0; if ((end - addr) != P4D_SIZE) @@ -194,7 +176,7 @@ static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; @@ -205,19 +187,22 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, do { next = p4d_addr_end(addr, end); - if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) { + if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, + max_page_shift)) { *mask |= PGTBL_P4D_MODIFIED; continue; } - if (vmap_pud_range(p4d, addr, next, phys_addr, prot, mask)) + if (vmap_pud_range(p4d, addr, next, phys_addr, prot, + max_page_shift, mask)) return -ENOMEM; } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_range(unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { pgd_t *pgd; unsigned long start; @@ -232,7 +217,8 @@ static int vmap_range(unsigned long addr, unsigned long end, pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, &mask); + err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, + max_page_shift, &mask); if (err) break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); @@ -248,7 +234,7 @@ static int vmap_range(unsigned long addr, unsigned long end, int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { - return vmap_range(addr, end, phys_addr, prot); + return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift); } #ifdef CONFIG_GENERIC_IOREMAP -- cgit v1.2.3-71-gd317 From 6f680e70b6ff58c9670769534196800233685d55 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:39 -0700 Subject: mm/vmalloc: provide fallback arch huge vmap support functions If an architecture doesn't support a particular page table level as a huge vmap page size then allow it to skip defining the support query function. Link: https://lkml.kernel.org/r/20210317062402.533919-11-npiggin@gmail.com Signed-off-by: Nicholas Piggin Suggested-by: Christoph Hellwig Cc: Borislav Petkov Cc: Catalin Marinas Cc: Ding Tianhong Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Miaohe Lin Cc: Michael Ellerman Cc: Russell King Cc: Thomas Gleixner Cc: Uladzislau Rezki (Sony) Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/vmalloc.h | 7 +++---- arch/powerpc/include/asm/vmalloc.h | 7 +++---- arch/x86/include/asm/vmalloc.h | 13 +++++-------- include/linux/vmalloc.h | 24 ++++++++++++++++++++---- 4 files changed, 31 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index fc9a12d6cc1a..7a22aeea9bb5 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -4,11 +4,8 @@ #include #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static inline bool arch_vmap_p4d_supported(pgprot_t prot) -{ - return false; -} +#define arch_vmap_pud_supported arch_vmap_pud_supported static inline bool arch_vmap_pud_supported(pgprot_t prot) { /* @@ -19,11 +16,13 @@ static inline bool arch_vmap_pud_supported(pgprot_t prot) !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } +#define arch_vmap_pmd_supported arch_vmap_pmd_supported static inline bool arch_vmap_pmd_supported(pgprot_t prot) { /* See arch_vmap_pud_supported() */ return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } + #endif #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h index 3f0c153befb0..4c69ece52a31 100644 --- a/arch/powerpc/include/asm/vmalloc.h +++ b/arch/powerpc/include/asm/vmalloc.h @@ -5,21 +5,20 @@ #include #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static inline bool arch_vmap_p4d_supported(pgprot_t prot) -{ - return false; -} +#define arch_vmap_pud_supported arch_vmap_pud_supported static inline bool arch_vmap_pud_supported(pgprot_t prot) { /* HPT does not cope with large pages in the vmalloc area */ return radix_enabled(); } +#define arch_vmap_pmd_supported arch_vmap_pmd_supported static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return radix_enabled(); } + #endif #endif /* _ASM_POWERPC_VMALLOC_H */ diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h index e714b00fc0ca..49ce331f3ac6 100644 --- a/arch/x86/include/asm/vmalloc.h +++ b/arch/x86/include/asm/vmalloc.h @@ -6,24 +6,21 @@ #include #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static inline bool arch_vmap_p4d_supported(pgprot_t prot) -{ - return false; -} +#ifdef CONFIG_X86_64 +#define arch_vmap_pud_supported arch_vmap_pud_supported static inline bool arch_vmap_pud_supported(pgprot_t prot) { -#ifdef CONFIG_X86_64 return boot_cpu_has(X86_FEATURE_GBPAGES); -#else - return false; -#endif } +#endif +#define arch_vmap_pmd_supported arch_vmap_pmd_supported static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return boot_cpu_has(X86_FEATURE_PSE); } + #endif #endif /* _ASM_X86_VMALLOC_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 358c51c702c0..eb5630be6783 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -78,10 +78,26 @@ struct vmap_area { }; }; -#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP -static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; } -static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; } -static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; } +/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ +#ifndef arch_vmap_p4d_supported +static inline bool arch_vmap_p4d_supported(pgprot_t prot) +{ + return false; +} +#endif + +#ifndef arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + return false; +} +#endif + +#ifndef arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return false; +} #endif /* -- cgit v1.2.3-71-gd317 From 5e9e3d777b99aabe2f91f793a52e870a02642160 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:43 -0700 Subject: mm: move vmap_range from mm/ioremap.c to mm/vmalloc.c This is a generic kernel virtual memory mapper, not specific to ioremap. Code is unchanged other than making vmap_range non-static. Link: https://lkml.kernel.org/r/20210317062402.533919-12-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Christoph Hellwig Cc: Borislav Petkov Cc: Catalin Marinas Cc: Ding Tianhong Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Miaohe Lin Cc: Michael Ellerman Cc: Russell King Cc: Thomas Gleixner Cc: Uladzislau Rezki (Sony) Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmalloc.h | 3 + mm/ioremap.c | 203 ------------------------------------------------ mm/vmalloc.c | 202 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 205 insertions(+), 203 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index eb5630be6783..ae9eb07d30d4 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -189,6 +189,9 @@ extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); #ifdef CONFIG_MMU +int vmap_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift); extern int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, diff --git a/mm/ioremap.c b/mm/ioremap.c index 3264d0203785..d1dcc7e744ac 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -28,209 +28,6 @@ early_param("nohugeiomap", set_nohugeiomap); static const bool iomap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ -static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) -{ - pte_t *pte; - u64 pfn; - - pfn = phys_addr >> PAGE_SHIFT; - pte = pte_alloc_kernel_track(pmd, addr, mask); - if (!pte) - return -ENOMEM; - do { - BUG_ON(!pte_none(*pte)); - set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); - pfn++; - } while (pte++, addr += PAGE_SIZE, addr != end); - *mask |= PGTBL_PTE_MODIFIED; - return 0; -} - -static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift) -{ - if (max_page_shift < PMD_SHIFT) - return 0; - - if (!arch_vmap_pmd_supported(prot)) - return 0; - - if ((end - addr) != PMD_SIZE) - return 0; - - if (!IS_ALIGNED(addr, PMD_SIZE)) - return 0; - - if (!IS_ALIGNED(phys_addr, PMD_SIZE)) - return 0; - - if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) - return 0; - - return pmd_set_huge(pmd, phys_addr, prot); -} - -static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift, pgtbl_mod_mask *mask) -{ - pmd_t *pmd; - unsigned long next; - - pmd = pmd_alloc_track(&init_mm, pud, addr, mask); - if (!pmd) - return -ENOMEM; - do { - next = pmd_addr_end(addr, end); - - if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, - max_page_shift)) { - *mask |= PGTBL_PMD_MODIFIED; - continue; - } - - if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask)) - return -ENOMEM; - } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); - return 0; -} - -static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift) -{ - if (max_page_shift < PUD_SHIFT) - return 0; - - if (!arch_vmap_pud_supported(prot)) - return 0; - - if ((end - addr) != PUD_SIZE) - return 0; - - if (!IS_ALIGNED(addr, PUD_SIZE)) - return 0; - - if (!IS_ALIGNED(phys_addr, PUD_SIZE)) - return 0; - - if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) - return 0; - - return pud_set_huge(pud, phys_addr, prot); -} - -static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift, pgtbl_mod_mask *mask) -{ - pud_t *pud; - unsigned long next; - - pud = pud_alloc_track(&init_mm, p4d, addr, mask); - if (!pud) - return -ENOMEM; - do { - next = pud_addr_end(addr, end); - - if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, - max_page_shift)) { - *mask |= PGTBL_PUD_MODIFIED; - continue; - } - - if (vmap_pmd_range(pud, addr, next, phys_addr, prot, - max_page_shift, mask)) - return -ENOMEM; - } while (pud++, phys_addr += (next - addr), addr = next, addr != end); - return 0; -} - -static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift) -{ - if (max_page_shift < P4D_SHIFT) - return 0; - - if (!arch_vmap_p4d_supported(prot)) - return 0; - - if ((end - addr) != P4D_SIZE) - return 0; - - if (!IS_ALIGNED(addr, P4D_SIZE)) - return 0; - - if (!IS_ALIGNED(phys_addr, P4D_SIZE)) - return 0; - - if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) - return 0; - - return p4d_set_huge(p4d, phys_addr, prot); -} - -static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift, pgtbl_mod_mask *mask) -{ - p4d_t *p4d; - unsigned long next; - - p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); - if (!p4d) - return -ENOMEM; - do { - next = p4d_addr_end(addr, end); - - if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, - max_page_shift)) { - *mask |= PGTBL_P4D_MODIFIED; - continue; - } - - if (vmap_pud_range(p4d, addr, next, phys_addr, prot, - max_page_shift, mask)) - return -ENOMEM; - } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); - return 0; -} - -static int vmap_range(unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift) -{ - pgd_t *pgd; - unsigned long start; - unsigned long next; - int err; - pgtbl_mod_mask mask = 0; - - might_sleep(); - BUG_ON(addr >= end); - - start = addr; - pgd = pgd_offset_k(addr); - do { - next = pgd_addr_end(addr, end); - err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, - max_page_shift, &mask); - if (err) - break; - } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); - - flush_cache_vmap(start, end); - - if (mask & ARCH_PAGE_TABLE_SYNC_MASK) - arch_sync_kernel_mappings(start, end); - - return err; -} - int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ce4066b46955..5c81717f7e0e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -68,6 +68,208 @@ static void free_work(struct work_struct *w) } /*** Page table manipulation functions ***/ +static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) +{ + pte_t *pte; + u64 pfn; + + pfn = phys_addr >> PAGE_SHIFT; + pte = pte_alloc_kernel_track(pmd, addr, mask); + if (!pte) + return -ENOMEM; + do { + BUG_ON(!pte_none(*pte)); + set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); + *mask |= PGTBL_PTE_MODIFIED; + return 0; +} + +static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) +{ + if (max_page_shift < PMD_SHIFT) + return 0; + + if (!arch_vmap_pmd_supported(prot)) + return 0; + + if ((end - addr) != PMD_SIZE) + return 0; + + if (!IS_ALIGNED(addr, PMD_SIZE)) + return 0; + + if (!IS_ALIGNED(phys_addr, PMD_SIZE)) + return 0; + + if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) + return 0; + + return pmd_set_huge(pmd, phys_addr, prot); +} + +static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift, pgtbl_mod_mask *mask) +{ + pmd_t *pmd; + unsigned long next; + + pmd = pmd_alloc_track(&init_mm, pud, addr, mask); + if (!pmd) + return -ENOMEM; + do { + next = pmd_addr_end(addr, end); + + if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, + max_page_shift)) { + *mask |= PGTBL_PMD_MODIFIED; + continue; + } + + if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask)) + return -ENOMEM; + } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); + return 0; +} + +static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) +{ + if (max_page_shift < PUD_SHIFT) + return 0; + + if (!arch_vmap_pud_supported(prot)) + return 0; + + if ((end - addr) != PUD_SIZE) + return 0; + + if (!IS_ALIGNED(addr, PUD_SIZE)) + return 0; + + if (!IS_ALIGNED(phys_addr, PUD_SIZE)) + return 0; + + if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) + return 0; + + return pud_set_huge(pud, phys_addr, prot); +} + +static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift, pgtbl_mod_mask *mask) +{ + pud_t *pud; + unsigned long next; + + pud = pud_alloc_track(&init_mm, p4d, addr, mask); + if (!pud) + return -ENOMEM; + do { + next = pud_addr_end(addr, end); + + if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, + max_page_shift)) { + *mask |= PGTBL_PUD_MODIFIED; + continue; + } + + if (vmap_pmd_range(pud, addr, next, phys_addr, prot, + max_page_shift, mask)) + return -ENOMEM; + } while (pud++, phys_addr += (next - addr), addr = next, addr != end); + return 0; +} + +static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) +{ + if (max_page_shift < P4D_SHIFT) + return 0; + + if (!arch_vmap_p4d_supported(prot)) + return 0; + + if ((end - addr) != P4D_SIZE) + return 0; + + if (!IS_ALIGNED(addr, P4D_SIZE)) + return 0; + + if (!IS_ALIGNED(phys_addr, P4D_SIZE)) + return 0; + + if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) + return 0; + + return p4d_set_huge(p4d, phys_addr, prot); +} + +static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift, pgtbl_mod_mask *mask) +{ + p4d_t *p4d; + unsigned long next; + + p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); + if (!p4d) + return -ENOMEM; + do { + next = p4d_addr_end(addr, end); + + if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, + max_page_shift)) { + *mask |= PGTBL_P4D_MODIFIED; + continue; + } + + if (vmap_pud_range(p4d, addr, next, phys_addr, prot, + max_page_shift, mask)) + return -ENOMEM; + } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); + return 0; +} + +int vmap_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) +{ + pgd_t *pgd; + unsigned long start; + unsigned long next; + int err; + pgtbl_mod_mask mask = 0; + + might_sleep(); + BUG_ON(addr >= end); + + start = addr; + pgd = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, end); + err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, + max_page_shift, &mask); + if (err) + break; + } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); + + flush_cache_vmap(start, end); + + if (mask & ARCH_PAGE_TABLE_SYNC_MASK) + arch_sync_kernel_mappings(start, end); + + return err; +} static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) -- cgit v1.2.3-71-gd317 From 121e6f3258fe393e22c36f61a319be8a4f2c05ae Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:49 -0700 Subject: mm/vmalloc: hugepage vmalloc mappings Support huge page vmalloc mappings. Config option HAVE_ARCH_HUGE_VMALLOC enables support on architectures that define HAVE_ARCH_HUGE_VMAP and supports PMD sized vmap mappings. vmalloc will attempt to allocate PMD-sized pages if allocating PMD size or larger, and fall back to small pages if that was unsuccessful. Architectures must ensure that any arch specific vmalloc allocations that require PAGE_SIZE mappings (e.g., module allocations vs strict module rwx) use the VM_NOHUGE flag to inhibit larger mappings. This can result in more internal fragmentation and memory overhead for a given allocation, an option nohugevmalloc is added to disable at boot. [colin.king@canonical.com: fix read of uninitialized pointer area] Link: https://lkml.kernel.org/r/20210318155955.18220-1-colin.king@canonical.com Link: https://lkml.kernel.org/r/20210317062402.533919-14-npiggin@gmail.com Signed-off-by: Nicholas Piggin Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christoph Hellwig Cc: Ding Tianhong Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Miaohe Lin Cc: Michael Ellerman Cc: Russell King Cc: Thomas Gleixner Cc: Uladzislau Rezki (Sony) Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 11 +++ include/linux/vmalloc.h | 21 +++++ mm/page_alloc.c | 5 +- mm/vmalloc.c | 220 +++++++++++++++++++++++++++++++++++++----------- 4 files changed, 209 insertions(+), 48 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 5e8f6680d4bf..bf27159be4d9 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -829,6 +829,17 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD config HAVE_ARCH_HUGE_VMAP bool +# +# Archs that select this would be capable of PMD-sized vmaps (i.e., +# arch_vmap_pmd_supported() returns true), and they must make no assumptions +# that vmalloc memory is mapped with PAGE_SIZE ptes. The VM_NO_HUGE_VMAP flag +# can be used to prohibit arch-specific allocations from using hugepages to +# help with this (e.g., modules may require it). +# +config HAVE_ARCH_HUGE_VMALLOC + depends on HAVE_ARCH_HUGE_VMAP + bool + config ARCH_WANT_HUGE_PMD_SHARE bool diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index ae9eb07d30d4..b4c82f2d40dc 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -26,6 +26,7 @@ struct notifier_block; /* in notifier.h */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ +#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ /* * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. @@ -54,6 +55,9 @@ struct vm_struct { unsigned long size; unsigned long flags; struct page **pages; +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + unsigned int page_order; +#endif unsigned int nr_pages; phys_addr_t phys_addr; const void *caller; @@ -188,6 +192,22 @@ void free_vm_area(struct vm_struct *area); extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); +static inline bool is_vm_area_hugepages(const void *addr) +{ + /* + * This may not 100% tell if the area is mapped with > PAGE_SIZE + * page table entries, if for some reason the architecture indicates + * larger sizes are available but decides not to use them, nothing + * prevents that. This only indicates the size of the physical page + * allocated in the vmalloc layer. + */ +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + return find_vm_area(addr)->page_order > 0; +#else + return false; +#endif +} + #ifdef CONFIG_MMU int vmap_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, @@ -205,6 +225,7 @@ static inline void set_vm_flush_reset_perms(void *addr) if (vm) vm->flags |= VM_FLUSH_RESET_PERMS; } + #else static inline int map_kernel_range_noflush(unsigned long start, unsigned long size, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 56a8103580d6..39ff5c604cef 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include @@ -8222,6 +8223,7 @@ void *__init alloc_large_system_hash(const char *tablename, void *table = NULL; gfp_t gfp_flags; bool virt; + bool huge; /* allow the kernel cmdline to have a say */ if (!numentries) { @@ -8289,6 +8291,7 @@ void *__init alloc_large_system_hash(const char *tablename, } else if (get_order(size) >= MAX_ORDER || hashdist) { table = __vmalloc(size, gfp_flags); virt = true; + huge = is_vm_area_hugepages(table); } else { /* * If bucketsize is not a power-of-two, we may free @@ -8305,7 +8308,7 @@ void *__init alloc_large_system_hash(const char *tablename, pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, - virt ? "vmalloc" : "linear"); + virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); if (_hash_shift) *_hash_shift = log2qty; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 41c1dbdd2677..59c815eb7e74 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -42,6 +42,19 @@ #include "internal.h" #include "pgalloc-track.h" +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC +static bool __ro_after_init vmap_allow_huge = true; + +static int __init set_nohugevmalloc(char *str) +{ + vmap_allow_huge = false; + return 0; +} +early_param("nohugevmalloc", set_nohugevmalloc); +#else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ +static const bool vmap_allow_huge = false; +#endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ + bool is_vmalloc_addr(const void *x) { unsigned long addr = (unsigned long)x; @@ -483,31 +496,12 @@ static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, return 0; } -/** - * map_kernel_range_noflush - map kernel VM area with the specified pages - * @addr: start of the VM area to map - * @size: size of the VM area to map - * @prot: page protection flags to use - * @pages: pages to map - * - * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should - * have been allocated using get_vm_area() and its friends. - * - * NOTE: - * This function does NOT do any cache flushing. The caller is responsible for - * calling flush_cache_vmap() on to-be-mapped areas before calling this - * function. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int map_kernel_range_noflush(unsigned long addr, unsigned long size, - pgprot_t prot, struct page **pages) +static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages) { unsigned long start = addr; - unsigned long end = addr + size; - unsigned long next; pgd_t *pgd; + unsigned long next; int err = 0; int nr = 0; pgtbl_mod_mask mask = 0; @@ -529,6 +523,66 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size, return 0; } +static int vmap_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) +{ + unsigned int i, nr = (end - addr) >> PAGE_SHIFT; + + WARN_ON(page_shift < PAGE_SHIFT); + + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || + page_shift == PAGE_SHIFT) + return vmap_small_pages_range_noflush(addr, end, prot, pages); + + for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { + int err; + + err = vmap_range_noflush(addr, addr + (1UL << page_shift), + __pa(page_address(pages[i])), prot, + page_shift); + if (err) + return err; + + addr += 1UL << page_shift; + } + + return 0; +} + +static int vmap_pages_range(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) +{ + int err; + + err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); + flush_cache_vmap(addr, end); + return err; +} + +/** + * map_kernel_range_noflush - map kernel VM area with the specified pages + * @addr: start of the VM area to map + * @size: size of the VM area to map + * @prot: page protection flags to use + * @pages: pages to map + * + * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should + * have been allocated using get_vm_area() and its friends. + * + * NOTE: + * This function does NOT do any cache flushing. The caller is responsible for + * calling flush_cache_vmap() on to-be-mapped areas before calling this + * function. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int map_kernel_range_noflush(unsigned long addr, unsigned long size, + pgprot_t prot, struct page **pages) +{ + return vmap_pages_range_noflush(addr, addr + size, prot, pages, PAGE_SHIFT); +} + int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) { @@ -2112,6 +2166,24 @@ EXPORT_SYMBOL(vm_map_ram); static struct vm_struct *vmlist __initdata; +static inline unsigned int vm_area_page_order(struct vm_struct *vm) +{ +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + return vm->page_order; +#else + return 0; +#endif +} + +static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) +{ +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + vm->page_order = order; +#else + BUG_ON(order != 0); +#endif +} + /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add @@ -2422,6 +2494,7 @@ static inline void set_area_direct_map(const struct vm_struct *area, { int i; + /* HUGE_VMALLOC passes small pages to set_direct_map */ for (i = 0; i < area->nr_pages; i++) if (page_address(area->pages[i])) set_direct_map(area->pages[i]); @@ -2431,6 +2504,7 @@ static inline void set_area_direct_map(const struct vm_struct *area, static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) { unsigned long start = ULONG_MAX, end = 0; + unsigned int page_order = vm_area_page_order(area); int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; int flush_dmap = 0; int i; @@ -2455,11 +2529,14 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) * map. Find the start and end range of the direct mappings to make sure * the vm_unmap_aliases() flush includes the direct map. */ - for (i = 0; i < area->nr_pages; i++) { + for (i = 0; i < area->nr_pages; i += 1U << page_order) { unsigned long addr = (unsigned long)page_address(area->pages[i]); if (addr) { + unsigned long page_size; + + page_size = PAGE_SIZE << page_order; start = min(addr, start); - end = max(addr + PAGE_SIZE, end); + end = max(addr + page_size, end); flush_dmap = 1; } } @@ -2500,13 +2577,14 @@ static void __vunmap(const void *addr, int deallocate_pages) vm_remove_mappings(area, deallocate_pages); if (deallocate_pages) { + unsigned int page_order = vm_area_page_order(area); int i; - for (i = 0; i < area->nr_pages; i++) { + for (i = 0; i < area->nr_pages; i += 1U << page_order) { struct page *page = area->pages[i]; BUG_ON(!page); - __free_pages(page, 0); + __free_pages(page, page_order); } atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); @@ -2697,15 +2775,19 @@ EXPORT_SYMBOL_GPL(vmap_pfn); #endif /* CONFIG_VMAP_PFN */ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, - pgprot_t prot, int node) + pgprot_t prot, unsigned int page_shift, + int node) { const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; - unsigned int nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; + unsigned long addr = (unsigned long)area->addr; + unsigned long size = get_vm_area_size(area); unsigned long array_size; - unsigned int i; + unsigned int nr_small_pages = size >> PAGE_SHIFT; + unsigned int page_order; struct page **pages; + unsigned int i; - array_size = (unsigned long)nr_pages * sizeof(struct page *); + array_size = (unsigned long)nr_small_pages * sizeof(struct page *); gfp_mask |= __GFP_NOWARN; if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) gfp_mask |= __GFP_HIGHMEM; @@ -2724,30 +2806,38 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, } area->pages = pages; - area->nr_pages = nr_pages; + area->nr_pages = nr_small_pages; + set_vm_area_page_order(area, page_shift - PAGE_SHIFT); - for (i = 0; i < area->nr_pages; i++) { - struct page *page; + page_order = vm_area_page_order(area); - if (node == NUMA_NO_NODE) - page = alloc_page(gfp_mask); - else - page = alloc_pages_node(node, gfp_mask, 0); + /* + * Careful, we allocate and map page_order pages, but tracking is done + * per PAGE_SIZE page so as to keep the vm_struct APIs independent of + * the physical/mapped size. + */ + for (i = 0; i < area->nr_pages; i += 1U << page_order) { + struct page *page; + int p; + /* Compound pages required for remap_vmalloc_page */ + page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vfree() */ area->nr_pages = i; atomic_long_add(area->nr_pages, &nr_vmalloc_pages); goto fail; } - area->pages[i] = page; + + for (p = 0; p < (1U << page_order); p++) + area->pages[i + p] = page + p; + if (gfpflags_allow_blocking(gfp_mask)) cond_resched(); } atomic_long_add(area->nr_pages, &nr_vmalloc_pages); - if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), - prot, pages) < 0) + if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) goto fail; return area->addr; @@ -2755,7 +2845,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, fail: warn_alloc(gfp_mask, NULL, "vmalloc: allocation failure, allocated %ld of %ld bytes", - (area->nr_pages*PAGE_SIZE), area->size); + (area->nr_pages*PAGE_SIZE), size); __vfree(area->addr); return NULL; } @@ -2786,19 +2876,45 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, struct vm_struct *area; void *addr; unsigned long real_size = size; + unsigned long real_align = align; + unsigned int shift = PAGE_SHIFT; - size = PAGE_ALIGN(size); - if (!size || (size >> PAGE_SHIFT) > totalram_pages()) + if (!size || (size >> PAGE_SHIFT) > totalram_pages()) { + area = NULL; goto fail; + } + + if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) && + arch_vmap_pmd_supported(prot)) { + unsigned long size_per_node; - area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED | + /* + * Try huge pages. Only try for PAGE_KERNEL allocations, + * others like modules don't yet expect huge pages in + * their allocations due to apply_to_page_range not + * supporting them. + */ + + size_per_node = size; + if (node == NUMA_NO_NODE) + size_per_node /= num_online_nodes(); + if (size_per_node >= PMD_SIZE) { + shift = PMD_SHIFT; + align = max(real_align, 1UL << shift); + size = ALIGN(real_size, 1UL << shift); + } + } + +again: + size = PAGE_ALIGN(size); + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); if (!area) goto fail; - addr = __vmalloc_area_node(area, gfp_mask, prot, node); + addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node); if (!addr) - return NULL; + goto fail; /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED @@ -2812,8 +2928,18 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, return addr; fail: - warn_alloc(gfp_mask, NULL, + if (shift > PAGE_SHIFT) { + shift = PAGE_SHIFT; + align = real_align; + size = real_size; + goto again; + } + + if (!area) { + /* Warn for area allocation, page allocations already warn */ + warn_alloc(gfp_mask, NULL, "vmalloc: allocation failure: %lu bytes", real_size); + } return NULL; } -- cgit v1.2.3-71-gd317 From b67177ecd956333029dbc1a4971a857fee0ccbb1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:53 -0700 Subject: mm/vmalloc: remove map_kernel_range MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm/vmalloc: cleanup after hugepage series", v2. Christoph pointed out some overdue cleanups required after the huge vmalloc series, and I had another failure error message improvement as well. This patch (of 5): This is a shim around vmap_pages_range, get rid of it. Move the main API comment from the _noflush variant to the normal variant, and make _noflush internal to mm/. Link: https://lkml.kernel.org/r/20210322021806.892164-1-npiggin@gmail.com Link: https://lkml.kernel.org/r/20210322021806.892164-2-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Christoph Hellwig Cc: Uladzislau Rezki Cc: Cédric Le Goater Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/core-api/cachetlb.rst | 2 +- include/linux/vmalloc.h | 11 ------- mm/internal.h | 6 ++++ mm/percpu-vm.c | 5 +-- mm/vmalloc.c | 65 ++++++++++++++++--------------------- 5 files changed, 38 insertions(+), 51 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst index a1582cc79f0f..756f7bcf8191 100644 --- a/Documentation/core-api/cachetlb.rst +++ b/Documentation/core-api/cachetlb.rst @@ -213,7 +213,7 @@ Here are the routines, one by one: there will be no entries in the cache for the kernel address space for virtual addresses in the range 'start' to 'end-1'. - The first of these two routines is invoked after map_kernel_range() + The first of these two routines is invoked after vmap_range() has installed the page table entries. The second is invoked before unmap_kernel_range() deletes the page table entries. diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index b4c82f2d40dc..fb3b9989a4c5 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -212,10 +212,6 @@ static inline bool is_vm_area_hugepages(const void *addr) int vmap_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift); -extern int map_kernel_range_noflush(unsigned long start, unsigned long size, - pgprot_t prot, struct page **pages); -int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, - struct page **pages); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); static inline void set_vm_flush_reset_perms(void *addr) @@ -227,13 +223,6 @@ static inline void set_vm_flush_reset_perms(void *addr) } #else -static inline int -map_kernel_range_noflush(unsigned long start, unsigned long size, - pgprot_t prot, struct page **pages) -{ - return size >> PAGE_SHIFT; -} -#define map_kernel_range map_kernel_range_noflush static inline void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { diff --git a/mm/internal.h b/mm/internal.h index bbe900f9f095..58c3757c52d9 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -637,4 +637,10 @@ struct migration_target_control { gfp_t gfp_mask; }; +/* + * mm/vmalloc.c + */ +int vmap_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift); + #endif /* __MM_INTERNAL_H */ diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index e46f7a6917f9..88a53eb68a94 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -8,6 +8,7 @@ * Chunks are mapped into vmalloc areas and populated page by page. * This is the default chunk allocator. */ +#include "internal.h" static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) @@ -192,8 +193,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, static int __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) { - return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, - PAGE_KERNEL, pages); + return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT), + PAGE_KERNEL, pages, PAGE_SHIFT); } /** diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 59c815eb7e74..527781a3a0fe 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -523,7 +523,16 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, return 0; } -static int vmap_pages_range_noflush(unsigned long addr, unsigned long end, +/* + * vmap_pages_range_noflush is similar to vmap_pages_range, but does not + * flush caches. + * + * The caller is responsible for calling flush_cache_vmap() after this + * function returns successfully and before the addresses are accessed. + * + * This is an internal function only. Do not use outside mm/. + */ +int vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { unsigned int i, nr = (end - addr) >> PAGE_SHIFT; @@ -549,48 +558,26 @@ static int vmap_pages_range_noflush(unsigned long addr, unsigned long end, return 0; } -static int vmap_pages_range(unsigned long addr, unsigned long end, - pgprot_t prot, struct page **pages, unsigned int page_shift) -{ - int err; - - err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); - flush_cache_vmap(addr, end); - return err; -} - /** - * map_kernel_range_noflush - map kernel VM area with the specified pages + * vmap_pages_range - map pages to a kernel virtual address * @addr: start of the VM area to map - * @size: size of the VM area to map + * @end: end of the VM area to map (non-inclusive) * @prot: page protection flags to use - * @pages: pages to map - * - * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should - * have been allocated using get_vm_area() and its friends. - * - * NOTE: - * This function does NOT do any cache flushing. The caller is responsible for - * calling flush_cache_vmap() on to-be-mapped areas before calling this - * function. + * @pages: pages to map (always PAGE_SIZE pages) + * @page_shift: maximum shift that the pages may be mapped with, @pages must + * be aligned and contiguous up to at least this shift. * * RETURNS: * 0 on success, -errno on failure. */ -int map_kernel_range_noflush(unsigned long addr, unsigned long size, - pgprot_t prot, struct page **pages) -{ - return vmap_pages_range_noflush(addr, addr + size, prot, pages, PAGE_SHIFT); -} - -int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, - struct page **pages) +static int vmap_pages_range(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) { - int ret; + int err; - ret = map_kernel_range_noflush(start, size, prot, pages); - flush_cache_vmap(start, start + size); - return ret; + err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); + flush_cache_vmap(addr, end); + return err; } int is_vmalloc_or_module_addr(const void *x) @@ -2156,10 +2143,12 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node) kasan_unpoison_vmalloc(mem, size); - if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) { + if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, + pages, PAGE_SHIFT) < 0) { vm_unmap_ram(mem, count); return NULL; } + return mem; } EXPORT_SYMBOL(vm_map_ram); @@ -2703,6 +2692,7 @@ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; + unsigned long addr; unsigned long size; /* In bytes */ might_sleep(); @@ -2715,8 +2705,9 @@ void *vmap(struct page **pages, unsigned int count, if (!area) return NULL; - if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), - pages) < 0) { + addr = (unsigned long)area->addr; + if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), + pages, PAGE_SHIFT) < 0) { vunmap(area->addr); return NULL; } -- cgit v1.2.3-71-gd317 From 4ad0ae8c64ac8f81a3651bca11be7c3cb086df80 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:59:01 -0700 Subject: mm/vmalloc: remove unmap_kernel_range MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a shim around vunmap_range, get rid of it. Move the main API comment from the _noflush variant to the normal variant, and make _noflush internal to mm/. [npiggin@gmail.com: fix nommu builds and a comment bug per sfr] Link: https://lkml.kernel.org/r/1617292598.m6g0knx24s.astroid@bobo.none [akpm@linux-foundation.org: move vunmap_range_noflush() stub inside !CONFIG_MMU, not !CONFIG_NUMA] [npiggin@gmail.com: fix nommu builds] Link: https://lkml.kernel.org/r/1617292497.o1uhq5ipxp.astroid@bobo.none Link: https://lkml.kernel.org/r/20210322021806.892164-5-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Christoph Hellwig Cc: Cédric Le Goater Cc: Uladzislau Rezki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/core-api/cachetlb.rst | 2 +- arch/arm64/mm/init.c | 2 +- arch/powerpc/kernel/isa-bridge.c | 4 +-- arch/powerpc/kernel/pci_64.c | 2 +- arch/powerpc/mm/ioremap.c | 2 +- drivers/pci/pci.c | 2 +- include/linux/vmalloc.h | 8 +---- mm/internal.h | 15 +++++++++- mm/percpu-vm.c | 2 +- mm/vmalloc.c | 59 ++++++++++++++++++------------------- 10 files changed, 51 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst index 756f7bcf8191..fe4290e26729 100644 --- a/Documentation/core-api/cachetlb.rst +++ b/Documentation/core-api/cachetlb.rst @@ -215,7 +215,7 @@ Here are the routines, one by one: The first of these two routines is invoked after vmap_range() has installed the page table entries. The second is invoked - before unmap_kernel_range() deletes the page table entries. + before vunmap_range() deletes the page table entries. There exists another whole class of cpu cache issues which currently require a whole different set of interfaces to handle properly. diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 3685e12aba9b..470f92e6a542 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -521,7 +521,7 @@ void free_initmem(void) * prevents the region from being reused for kernel modules, which * is not supported by kallsyms. */ - unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); + vunmap_range((u64)__init_begin, (u64)__init_end); } void dump_mem_limit(void) diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 2257d24e6a26..39c625737c09 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -48,7 +48,7 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size) if (slab_is_available()) { if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa, pgprot_noncached(PAGE_KERNEL))) - unmap_kernel_range(ISA_IO_BASE, size); + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size); } else { early_ioremap_range(ISA_IO_BASE, pa, size, pgprot_noncached(PAGE_KERNEL)); @@ -311,7 +311,7 @@ static void isa_bridge_remove(void) isa_bridge_pcidev = NULL; /* Unmap the ISA area */ - unmap_kernel_range(ISA_IO_BASE, 0x10000); + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + 0x10000); } /** diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 9312e6eda7ff..3fb7e572abed 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -140,7 +140,7 @@ void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size) addr = (unsigned long)area->addr; if (ioremap_page_range(addr, addr + size, paddr, pgprot_noncached(PAGE_KERNEL))) { - unmap_kernel_range(addr, size); + vunmap_range(addr, addr + size); return NULL; } diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index b1a0aebe8c48..57342154d2b0 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -93,7 +93,7 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, if (!ret) return (void __iomem *)area->addr + offset; - unmap_kernel_range(va, size); + vunmap_range(va, va + size); free_vm_area(area); return NULL; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e4d4e399004b..f4c26e6118ea 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4102,7 +4102,7 @@ void pci_unmap_iospace(struct resource *res) #if defined(PCI_IOBASE) && defined(CONFIG_MMU) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; - unmap_kernel_range(vaddr, resource_size(res)); + vunmap_range(vaddr, vaddr + resource_size(res)); #endif } EXPORT_SYMBOL(pci_unmap_iospace); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index fb3b9989a4c5..394d03cc0e92 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -212,8 +212,7 @@ static inline bool is_vm_area_hugepages(const void *addr) int vmap_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift); -extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); -extern void unmap_kernel_range(unsigned long addr, unsigned long size); +void vunmap_range(unsigned long addr, unsigned long end); static inline void set_vm_flush_reset_perms(void *addr) { struct vm_struct *vm = find_vm_area(addr); @@ -223,11 +222,6 @@ static inline void set_vm_flush_reset_perms(void *addr) } #else -static inline void -unmap_kernel_range_noflush(unsigned long addr, unsigned long size) -{ -} -#define unmap_kernel_range unmap_kernel_range_noflush static inline void set_vm_flush_reset_perms(void *addr) { } diff --git a/mm/internal.h b/mm/internal.h index 58c3757c52d9..42e30e71554a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -446,7 +446,9 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } - +static inline void vunmap_range_noflush(unsigned long start, unsigned long end) +{ +} #endif /* !CONFIG_MMU */ /* @@ -640,7 +642,18 @@ struct migration_target_control { /* * mm/vmalloc.c */ +#ifdef CONFIG_MMU int vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift); +#else +static inline +int vmap_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) +{ + return -EINVAL; +} +#endif + +void vunmap_range_noflush(unsigned long start, unsigned long end); #endif /* __MM_INTERNAL_H */ diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 88a53eb68a94..8d3844bc0c7c 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -134,7 +134,7 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) { - unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); + vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT)); } /** diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 527781a3a0fe..f7a53c19e84b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -378,22 +378,20 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, } while (p4d++, addr = next, addr != end); } -/** - * unmap_kernel_range_noflush - unmap kernel VM area - * @start: start of the VM area to unmap - * @size: size of the VM area to unmap +/* + * vunmap_range_noflush is similar to vunmap_range, but does not + * flush caches or TLBs. * - * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify - * should have been allocated using get_vm_area() and its friends. + * The caller is responsible for calling flush_cache_vmap() before calling + * this function, and flush_tlb_kernel_range after it has returned + * successfully (and before the addresses are expected to cause a page fault + * or be re-mapped for something else, if TLB flushes are being delayed or + * coalesced). * - * NOTE: - * This function does NOT do any cache flushing. The caller is responsible - * for calling flush_cache_vunmap() on to-be-mapped areas before calling this - * function and flush_tlb_kernel_range() after. + * This is an internal function only. Do not use outside mm/. */ -void unmap_kernel_range_noflush(unsigned long start, unsigned long size) +void vunmap_range_noflush(unsigned long start, unsigned long end) { - unsigned long end = start + size; unsigned long next; pgd_t *pgd; unsigned long addr = start; @@ -414,6 +412,22 @@ void unmap_kernel_range_noflush(unsigned long start, unsigned long size) arch_sync_kernel_mappings(start, end); } +/** + * vunmap_range - unmap kernel virtual addresses + * @addr: start of the VM area to unmap + * @end: end of the VM area to unmap (non-inclusive) + * + * Clears any present PTEs in the virtual address range, flushes TLBs and + * caches. Any subsequent access to the address before it has been re-mapped + * is a kernel bug. + */ +void vunmap_range(unsigned long addr, unsigned long end) +{ + flush_cache_vunmap(addr, end); + vunmap_range_noflush(addr, end); + flush_tlb_kernel_range(addr, end); +} + static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) @@ -1712,7 +1726,7 @@ static void free_vmap_area_noflush(struct vmap_area *va) static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); - unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start); + vunmap_range_noflush(va->va_start, va->va_end); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(va->va_start, va->va_end); @@ -1990,7 +2004,7 @@ static void vb_free(unsigned long addr, unsigned long size) offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr)); - unmap_kernel_range_noflush(addr, size); + vunmap_range_noflush(addr, addr + size); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(addr, addr + size); @@ -2307,23 +2321,6 @@ void __init vmalloc_init(void) vmap_initialized = true; } -/** - * unmap_kernel_range - unmap kernel VM area and flush cache and TLB - * @addr: start of the VM area to unmap - * @size: size of the VM area to unmap - * - * Similar to unmap_kernel_range_noflush() but flushes vcache before - * the unmapping and tlb after. - */ -void unmap_kernel_range(unsigned long addr, unsigned long size) -{ - unsigned long end = addr + size; - - flush_cache_vunmap(addr, end); - unmap_kernel_range_noflush(addr, size); - flush_tlb_kernel_range(addr, end); -} - static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { -- cgit v1.2.3-71-gd317 From 78f4841e34763079be0661744c1ca997be64eb56 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 22:59:25 -0700 Subject: mm/doc: fix fault_flag_allow_retry_first kerneldoc make htmldocs reports: include/linux/mm.h:496: warning: Function parameter or member 'flags' not described in 'fault_flag_allow_retry_first' Add a description. Link: https://lkml.kernel.org/r/20210322195022.2143603-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: John Hubbard Cc: Mike Rapoport Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 93097dbd9604..382c33e7d906 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -485,6 +485,7 @@ extern pgprot_t protection_map[16]; /** * fault_flag_allow_retry_first - check ALLOW_RETRY the first time + * @flags: Fault flags. * * This is mostly used for places where we want to try to avoid taking * the mmap_lock for too long a time when waiting for another condition -- cgit v1.2.3-71-gd317 From 136dfc9949f84089217f84e6478471dabbf14ba7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 22:59:28 -0700 Subject: mm/doc: fix page_maybe_dma_pinned kerneldoc make htmldocs reports: include/linux/mm.h:1341: warning: Excess function parameter 'Return' description in 'page_maybe_dma_pinned' Fix a few other formatting nits while I'm editing this description. Link: https://lkml.kernel.org/r/20210322195022.2143603-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Mike Rapoport Reviewed-by: John Hubbard Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 382c33e7d906..5acda8761935 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1271,10 +1271,11 @@ void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, void unpin_user_pages(struct page **pages, unsigned long npages); /** - * page_maybe_dma_pinned() - report if a page is pinned for DMA. + * page_maybe_dma_pinned - Report if a page is pinned for DMA. + * @page: The page. * * This function checks if a page has been pinned via a call to - * pin_user_pages*(). + * a function in the pin_user_pages() family. * * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, * because it means "definitely not pinned for DMA", but true means "probably @@ -1292,9 +1293,8 @@ void unpin_user_pages(struct page **pages, unsigned long npages); * * For more information, please see Documentation/core-api/pin_user_pages.rst. * - * @page: pointer to page to be queried. - * @Return: True, if it is likely that the page has been "dma-pinned". - * False, if the page is definitely not dma-pinned. + * Return: True, if it is likely that the page has been "dma-pinned". + * False, if the page is definitely not dma-pinned. */ static inline bool page_maybe_dma_pinned(struct page *page) { -- cgit v1.2.3-71-gd317 From da2f5eb3d344503c4d851bdf1ae2379167074413 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 22:59:31 -0700 Subject: mm/doc: turn fault flags into an enum The kernel-doc script complains about include/linux/mm.h:425: warning: wrong kernel-doc identifier on line: * Fault flag definitions. I don't know how to document a series of #defines, so turn these definitions into an enum and document that instead. Link: https://lkml.kernel.org/r/20210322195022.2143603-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Mike Rapoport Cc: John Hubbard Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 5acda8761935..64b2e3a0b94d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -432,8 +432,7 @@ extern unsigned int kobjsize(const void *objp); extern pgprot_t protection_map[16]; /** - * Fault flag definitions. - * + * enum fault_flag - Fault flag definitions. * @FAULT_FLAG_WRITE: Fault was a write fault. * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. @@ -464,16 +463,18 @@ extern pgprot_t protection_map[16]; * signals before a retry to make sure the continuous page faults can still be * interrupted if necessary. */ -#define FAULT_FLAG_WRITE 0x01 -#define FAULT_FLAG_MKWRITE 0x02 -#define FAULT_FLAG_ALLOW_RETRY 0x04 -#define FAULT_FLAG_RETRY_NOWAIT 0x08 -#define FAULT_FLAG_KILLABLE 0x10 -#define FAULT_FLAG_TRIED 0x20 -#define FAULT_FLAG_USER 0x40 -#define FAULT_FLAG_REMOTE 0x80 -#define FAULT_FLAG_INSTRUCTION 0x100 -#define FAULT_FLAG_INTERRUPTIBLE 0x200 +enum fault_flag { + FAULT_FLAG_WRITE = 1 << 0, + FAULT_FLAG_MKWRITE = 1 << 1, + FAULT_FLAG_ALLOW_RETRY = 1 << 2, + FAULT_FLAG_RETRY_NOWAIT = 1 << 3, + FAULT_FLAG_KILLABLE = 1 << 4, + FAULT_FLAG_TRIED = 1 << 5, + FAULT_FLAG_USER = 1 << 6, + FAULT_FLAG_REMOTE = 1 << 7, + FAULT_FLAG_INSTRUCTION = 1 << 8, + FAULT_FLAG_INTERRUPTIBLE = 1 << 9, +}; /* * The default fault flags that should be used by most of the @@ -496,7 +497,7 @@ extern pgprot_t protection_map[16]; * Return: true if the page fault allows retry and this is the first * attempt of the fault handling; false otherwise. */ -static inline bool fault_flag_allow_retry_first(unsigned int flags) +static inline bool fault_flag_allow_retry_first(enum fault_flag flags) { return (flags & FAULT_FLAG_ALLOW_RETRY) && (!(flags & FAULT_FLAG_TRIED)); @@ -531,7 +532,7 @@ struct vm_fault { pgoff_t pgoff; /* Logical page offset based on vma */ unsigned long address; /* Faulting virtual address */ }; - unsigned int flags; /* FAULT_FLAG_xxx flags + enum fault_flag flags; /* FAULT_FLAG_xxx flags * XXX: should really be 'const' */ pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ -- cgit v1.2.3-71-gd317 From 91ab1a41191ef2d4c6e123951a0f0c3876bd9376 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Thu, 29 Apr 2021 22:59:40 -0700 Subject: pagewalk: prefix struct kernel-doc descriptions The script './scripts/kernel-doc -none ./include/linux/pagewalk.h' reports: include/linux/pagewalk.h:37: warning: cannot understand function prototype: 'struct mm_walk_ops ' include/linux/pagewalk.h:85: warning: cannot understand function prototype: 'struct mm_walk ' A kernel-doc description for a structure requires to prefix the struct name with the keyword 'struct'. So, do that such that no further kernel-doc warnings are reported for this file. Link: https://lkml.kernel.org/r/20210322122542.15072-3-lukas.bulwahn@gmail.com Signed-off-by: Lukas Bulwahn Cc: Joe Perches Cc: Jonathan Corbet Cc: Ralf Ramsauer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagewalk.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index b1cb6b753abb..ac7b38ad5903 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -7,7 +7,7 @@ struct mm_walk; /** - * mm_walk_ops - callbacks for walk_page_range + * struct mm_walk_ops - callbacks for walk_page_range * @pgd_entry: if set, called for each non-empty PGD (top-level) entry * @p4d_entry: if set, called for each non-empty P4D entry * @pud_entry: if set, called for each non-empty PUD entry @@ -71,7 +71,7 @@ enum page_walk_action { }; /** - * mm_walk - walk_page_range data + * struct mm_walk - walk_page_range data * @ops: operation to call during the walk * @mm: mm_struct representing the target process of page table walk * @pgd: pointer to PGD; only valid with no_vma (otherwise set to NULL) -- cgit v1.2.3-71-gd317 From a064cb00d359bc464df6fd2ab6dfb8dc4b31e361 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 29 Apr 2021 22:59:49 -0700 Subject: kasan: initialize shadow to TAG_INVALID for SW_TAGS Currently, KASAN_SW_TAGS uses 0xFF as the default tag value for unallocated memory. The underlying idea is that since that memory hasn't been allocated yet, it's only supposed to be dereferenced through a pointer with the native 0xFF tag. While this is a good idea in terms on consistency, practically it doesn't bring any benefit. Since the 0xFF pointer tag is a match-all tag, it doesn't matter what tag the accessed memory has. No accesses through 0xFF-tagged pointers are considered buggy by KASAN. This patch changes the default tag value for unallocated memory to 0xFE, which is the tag KASAN uses for inaccessible memory. This doesn't affect accesses through 0xFF-tagged pointer to this memory, but this allows KASAN to detect wild and large out-of-bounds invalid memory accesses through otherwise-tagged pointers. This is a prepatory patch for the next one, which changes the tag-based KASAN modes to not poison the boot memory. Link: https://lkml.kernel.org/r/c8e93571c18b3528aac5eb33ade213bf133d10ad.1613692950.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Christoph Hellwig Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Marco Elver Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d53ea3c047bc..9f5faefd1744 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -30,7 +30,8 @@ struct kunit_kasan_expectation { /* Software KASAN implementations use shadow memory. */ #ifdef CONFIG_KASAN_SW_TAGS -#define KASAN_SHADOW_INIT 0xFF +/* This matches KASAN_TAG_INVALID. */ +#define KASAN_SHADOW_INIT 0xFE #else #define KASAN_SHADOW_INIT 0 #endif -- cgit v1.2.3-71-gd317 From 1bb5eab30d68c1a3d9dbc822e1895e6c06dbe748 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 29 Apr 2021 23:00:02 -0700 Subject: kasan, mm: integrate page_alloc init with HW_TAGS This change uses the previously added memory initialization feature of HW_TAGS KASAN routines for page_alloc memory when init_on_alloc/free is enabled. With this change, kernel_init_free_pages() is no longer called when both HW_TAGS KASAN and init_on_alloc/free are enabled. Instead, memory is initialized in KASAN runtime. To avoid discrepancies with which memory gets initialized that can be caused by future changes, both KASAN and kernel_init_free_pages() hooks are put together and a warning comment is added. This patch changes the order in which memory initialization and page poisoning hooks are called. This doesn't lead to any side-effects, as whenever page poisoning is enabled, memory initialization gets disabled. Combining setting allocation tags with memory initialization improves HW_TAGS KASAN performance when init_on_alloc/free is enabled. [andreyknvl@google.com: fix for "integrate page_alloc init with HW_TAGS"] Link: https://lkml.kernel.org/r/65b6028dea2e9a6e8e2cb779b5115c09457363fc.1617122211.git.andreyknvl@google.com Link: https://lkml.kernel.org/r/e77f0d5b1b20658ef0b8288625c74c2b3690e725.1615296150.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Tested-by: Vlastimil Babka Reviewed-by: Sergei Trofimovich Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Christoph Lameter Cc: David Rientjes Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Joonsoo Kim Cc: Kevin Brodsky Cc: Pekka Enberg Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 30 ++++++++++++++++++++++-------- mm/kasan/common.c | 8 ++++---- mm/mempool.c | 4 ++-- mm/page_alloc.c | 41 +++++++++++++++++++++++++++++++---------- 4 files changed, 59 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 9f5faefd1744..30aa2bee8400 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -96,6 +96,11 @@ static __always_inline bool kasan_enabled(void) return static_branch_likely(&kasan_flag_enabled); } +static inline bool kasan_has_integrated_init(void) +{ + return kasan_enabled(); +} + #else /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_enabled(void) @@ -103,6 +108,11 @@ static inline bool kasan_enabled(void) return true; } +static inline bool kasan_has_integrated_init(void) +{ + return false; +} + #endif /* CONFIG_KASAN_HW_TAGS */ slab_flags_t __kasan_never_merge(void); @@ -120,20 +130,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size) __kasan_unpoison_range(addr, size); } -void __kasan_alloc_pages(struct page *page, unsigned int order); +void __kasan_alloc_pages(struct page *page, unsigned int order, bool init); static __always_inline void kasan_alloc_pages(struct page *page, - unsigned int order) + unsigned int order, bool init) { if (kasan_enabled()) - __kasan_alloc_pages(page, order); + __kasan_alloc_pages(page, order, init); } -void __kasan_free_pages(struct page *page, unsigned int order); +void __kasan_free_pages(struct page *page, unsigned int order, bool init); static __always_inline void kasan_free_pages(struct page *page, - unsigned int order) + unsigned int order, bool init) { if (kasan_enabled()) - __kasan_free_pages(page, order); + __kasan_free_pages(page, order, init); } void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, @@ -277,13 +287,17 @@ static inline bool kasan_enabled(void) { return false; } +static inline bool kasan_has_integrated_init(void) +{ + return false; +} static inline slab_flags_t kasan_never_merge(void) { return 0; } static inline void kasan_unpoison_range(const void *address, size_t size) {} -static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} -static inline void kasan_free_pages(struct page *page, unsigned int order) {} +static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {} +static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b3d8eb154f59..efe58e58cc93 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void) return 0; } -void __kasan_alloc_pages(struct page *page, unsigned int order) +void __kasan_alloc_pages(struct page *page, unsigned int order, bool init) { u8 tag; unsigned long i; @@ -108,14 +108,14 @@ void __kasan_alloc_pages(struct page *page, unsigned int order) tag = kasan_random_tag(); for (i = 0; i < (1 << order); i++) page_kasan_tag_set(page + i, tag); - kasan_unpoison(page_address(page), PAGE_SIZE << order, false); + kasan_unpoison(page_address(page), PAGE_SIZE << order, init); } -void __kasan_free_pages(struct page *page, unsigned int order) +void __kasan_free_pages(struct page *page, unsigned int order, bool init) { if (likely(!PageHighMem(page))) kasan_poison(page_address(page), PAGE_SIZE << order, - KASAN_FREE_PAGE, false); + KASAN_FREE_PAGE, init); } /* diff --git a/mm/mempool.c b/mm/mempool.c index 79959fac27d7..fe19d290a301 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -106,7 +106,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) kasan_slab_free_mempool(element); else if (pool->alloc == mempool_alloc_pages) - kasan_free_pages(element, (unsigned long)pool->pool_data); + kasan_free_pages(element, (unsigned long)pool->pool_data, false); } static void kasan_unpoison_element(mempool_t *pool, void *element) @@ -114,7 +114,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) kasan_unpoison_range(element, __ksize(element)); else if (pool->alloc == mempool_alloc_pages) - kasan_alloc_pages(element, (unsigned long)pool->pool_data); + kasan_alloc_pages(element, (unsigned long)pool->pool_data, false); } static __always_inline void add_element(mempool_t *pool, void *element) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 342cabf912a7..6314de5387f5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -397,14 +397,14 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages); * initialization is done, but this is not likely to happen. */ static inline void kasan_free_nondeferred_pages(struct page *page, int order, - fpi_t fpi_flags) + bool init, fpi_t fpi_flags) { if (static_branch_unlikely(&deferred_pages)) return; if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && (fpi_flags & FPI_SKIP_KASAN_POISON)) return; - kasan_free_pages(page, order); + kasan_free_pages(page, order, init); } /* Returns true if the struct page for the pfn is uninitialised */ @@ -456,12 +456,12 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) } #else static inline void kasan_free_nondeferred_pages(struct page *page, int order, - fpi_t fpi_flags) + bool init, fpi_t fpi_flags) { if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && (fpi_flags & FPI_SKIP_KASAN_POISON)) return; - kasan_free_pages(page, order); + kasan_free_pages(page, order, init); } static inline bool early_page_uninitialised(unsigned long pfn) @@ -1243,6 +1243,7 @@ static __always_inline bool free_pages_prepare(struct page *page, unsigned int order, bool check_free, fpi_t fpi_flags) { int bad = 0; + bool init; VM_BUG_ON_PAGE(PageTail(page), page); @@ -1300,16 +1301,21 @@ static __always_inline bool free_pages_prepare(struct page *page, debug_check_no_obj_freed(page_address(page), PAGE_SIZE << order); } - if (want_init_on_free()) - kernel_init_free_pages(page, 1 << order); kernel_poison_pages(page, 1 << order); /* + * As memory initialization might be integrated into KASAN, + * kasan_free_pages and kernel_init_free_pages must be + * kept together to avoid discrepancies in behavior. + * * With hardware tag-based KASAN, memory tags must be set before the * page becomes unavailable via debug_pagealloc or arch_free_page. */ - kasan_free_nondeferred_pages(page, order, fpi_flags); + init = want_init_on_free(); + if (init && !kasan_has_integrated_init()) + kernel_init_free_pages(page, 1 << order); + kasan_free_nondeferred_pages(page, order, init, fpi_flags); /* * arch_free_page() can make the page's contents inaccessible. s390 @@ -2316,17 +2322,32 @@ static bool check_new_pages(struct page *page, unsigned int order) inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { + bool init; + set_page_private(page, 0); set_page_refcounted(page); arch_alloc_page(page, order); debug_pagealloc_map_pages(page, 1 << order); - kasan_alloc_pages(page, order); + + /* + * Page unpoisoning must happen before memory initialization. + * Otherwise, the poison pattern will be overwritten for __GFP_ZERO + * allocations and the page unpoisoning code will complain. + */ kernel_unpoison_pages(page, 1 << order); - set_page_owner(page, order, gfp_flags); - if (!want_init_on_free() && want_init_on_alloc(gfp_flags)) + /* + * As memory initialization might be integrated into KASAN, + * kasan_alloc_pages and kernel_init_free_pages must be + * kept together to avoid discrepancies in behavior. + */ + init = !want_init_on_free() && want_init_on_alloc(gfp_flags); + kasan_alloc_pages(page, order, init); + if (init && !kasan_has_integrated_init()) kernel_init_free_pages(page, 1 << order); + + set_page_owner(page, order, gfp_flags); } static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, -- cgit v1.2.3-71-gd317 From da844b787245194cfd69f0f1d2fb1dd3640a8a6d Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 29 Apr 2021 23:00:06 -0700 Subject: kasan, mm: integrate slab init_on_alloc with HW_TAGS This change uses the previously added memory initialization feature of HW_TAGS KASAN routines for slab memory when init_on_alloc is enabled. With this change, memory initialization memset() is no longer called when both HW_TAGS KASAN and init_on_alloc are enabled. Instead, memory is initialized in KASAN runtime. The memory initialization memset() is moved into slab_post_alloc_hook() that currently directly follows the initialization loop. A new argument is added to slab_post_alloc_hook() that indicates whether to initialize the memory or not. To avoid discrepancies with which memory gets initialized that can be caused by future changes, both KASAN hook and initialization memset() are put together and a warning comment is added. Combining setting allocation tags with memory initialization improves HW_TAGS KASAN performance when init_on_alloc is enabled. Link: https://lkml.kernel.org/r/c1292aeb5d519da221ec74a0684a949b027d7720.1615296150.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Christoph Lameter Cc: David Rientjes Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Joonsoo Kim Cc: Kevin Brodsky Cc: Pekka Enberg Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 8 ++++---- mm/kasan/common.c | 4 ++-- mm/slab.c | 28 +++++++++++++--------------- mm/slab.h | 17 +++++++++++++---- mm/slub.c | 27 +++++++++++---------------- 5 files changed, 43 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 30aa2bee8400..629aee484b6c 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -226,12 +226,12 @@ static __always_inline void kasan_slab_free_mempool(void *ptr) } void * __must_check __kasan_slab_alloc(struct kmem_cache *s, - void *object, gfp_t flags); + void *object, gfp_t flags, bool init); static __always_inline void * __must_check kasan_slab_alloc( - struct kmem_cache *s, void *object, gfp_t flags) + struct kmem_cache *s, void *object, gfp_t flags, bool init) { if (kasan_enabled()) - return __kasan_slab_alloc(s, object, flags); + return __kasan_slab_alloc(s, object, flags, init); return object; } @@ -320,7 +320,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object) static inline void kasan_kfree_large(void *ptr) {} static inline void kasan_slab_free_mempool(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags) + gfp_t flags, bool init) { return object; } diff --git a/mm/kasan/common.c b/mm/kasan/common.c index efe58e58cc93..ac0d4ed9c921 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -428,7 +428,7 @@ static void set_alloc_info(struct kmem_cache *cache, void *object, } void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, - void *object, gfp_t flags) + void *object, gfp_t flags, bool init) { u8 tag; void *tagged_object; @@ -453,7 +453,7 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, * Unpoison the whole object. * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. */ - kasan_unpoison(tagged_object, cache->object_size, false); + kasan_unpoison(tagged_object, cache->object_size, init); /* Save alloc info (if possible) for non-kmalloc() allocations. */ if (kasan_stack_collection_enabled()) diff --git a/mm/slab.c b/mm/slab.c index 4e212cda8693..84f183e9b31a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3216,6 +3216,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ void *ptr; int slab_node = numa_mem_id(); struct obj_cgroup *objcg = NULL; + bool init = false; flags &= gfp_allowed_mask; cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); @@ -3254,12 +3255,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ out: local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); - - if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr) - memset(ptr, 0, cachep->object_size); + init = slab_want_init_on_alloc(flags, cachep); out_hooks: - slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr); + slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init); return ptr; } @@ -3301,6 +3300,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned lo unsigned long save_flags; void *objp; struct obj_cgroup *objcg = NULL; + bool init = false; flags &= gfp_allowed_mask; cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); @@ -3317,12 +3317,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned lo local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); prefetchw(objp); - - if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp) - memset(objp, 0, cachep->object_size); + init = slab_want_init_on_alloc(flags, cachep); out: - slab_post_alloc_hook(cachep, objcg, flags, 1, &objp); + slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init); return objp; } @@ -3542,18 +3540,18 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); - /* Clear memory outside IRQ disabled section */ - if (unlikely(slab_want_init_on_alloc(flags, s))) - for (i = 0; i < size; i++) - memset(p[i], 0, s->object_size); - - slab_post_alloc_hook(s, objcg, flags, size, p); + /* + * memcg and kmem_cache debug support and memory initialization. + * Done outside of the IRQ disabled section. + */ + slab_post_alloc_hook(s, objcg, flags, size, p, + slab_want_init_on_alloc(flags, s)); /* FIXME: Trace call missing. Christoph would like a bulk variant */ return size; error: local_irq_enable(); cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); - slab_post_alloc_hook(s, objcg, flags, i, p); + slab_post_alloc_hook(s, objcg, flags, i, p, false); __kmem_cache_free_bulk(s, i, p); return 0; } diff --git a/mm/slab.h b/mm/slab.h index c30ed35b3d5d..18c1927cd196 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -506,15 +506,24 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, } static inline void slab_post_alloc_hook(struct kmem_cache *s, - struct obj_cgroup *objcg, - gfp_t flags, size_t size, void **p) + struct obj_cgroup *objcg, gfp_t flags, + size_t size, void **p, bool init) { size_t i; flags &= gfp_allowed_mask; + + /* + * As memory initialization might be integrated into KASAN, + * kasan_slab_alloc and initialization memset must be + * kept together to avoid discrepancies in behavior. + * + * As p[i] might get tagged, memset and kmemleak hook come after KASAN. + */ for (i = 0; i < size; i++) { - p[i] = kasan_slab_alloc(s, p[i], flags); - /* As p[i] might get tagged, call kmemleak hook after KASAN. */ + p[i] = kasan_slab_alloc(s, p[i], flags, init); + if (p[i] && init && !kasan_has_integrated_init()) + memset(p[i], 0, s->object_size); kmemleak_alloc_recursive(p[i], s->object_size, 1, s->flags, flags); } diff --git a/mm/slub.c b/mm/slub.c index a178c738fc92..5cf35250f20c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2823,6 +2823,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct page *page; unsigned long tid; struct obj_cgroup *objcg = NULL; + bool init = false; s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); if (!s) @@ -2900,12 +2901,10 @@ redo: } maybe_wipe_obj_freeptr(s, object); - - if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) - memset(kasan_reset_tag(object), 0, s->object_size); + init = slab_want_init_on_alloc(gfpflags, s); out: - slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); + slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init); return object; } @@ -3357,20 +3356,16 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, c->tid = next_tid(c->tid); local_irq_enable(); - /* Clear memory outside IRQ disabled fastpath loop */ - if (unlikely(slab_want_init_on_alloc(flags, s))) { - int j; - - for (j = 0; j < i; j++) - memset(kasan_reset_tag(p[j]), 0, s->object_size); - } - - /* memcg and kmem_cache debug support */ - slab_post_alloc_hook(s, objcg, flags, size, p); + /* + * memcg and kmem_cache debug support and memory initialization. + * Done outside of the IRQ disabled fastpath loop. + */ + slab_post_alloc_hook(s, objcg, flags, size, p, + slab_want_init_on_alloc(flags, s)); return i; error: local_irq_enable(); - slab_post_alloc_hook(s, objcg, flags, i, p); + slab_post_alloc_hook(s, objcg, flags, i, p, false); __kmem_cache_free_bulk(s, i, p); return 0; } @@ -3580,7 +3575,7 @@ static void early_kmem_cache_node_alloc(int node) init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_tracking(kmem_cache_node, n); #endif - n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL); + n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); page->freelist = get_freepointer(kmem_cache_node, n); page->inuse = 1; page->frozen = 0; -- cgit v1.2.3-71-gd317 From d57a964e09c22441e9fb497d1d7a5c1983a5d1fb Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 29 Apr 2021 23:00:09 -0700 Subject: kasan, mm: integrate slab init_on_free with HW_TAGS This change uses the previously added memory initialization feature of HW_TAGS KASAN routines for slab memory when init_on_free is enabled. With this change, memory initialization memset() is no longer called when both HW_TAGS KASAN and init_on_free are enabled. Instead, memory is initialized in KASAN runtime. For SLUB, the memory initialization memset() is moved into slab_free_hook() that currently directly follows the initialization loop. A new argument is added to slab_free_hook() that indicates whether to initialize the memory or not. To avoid discrepancies with which memory gets initialized that can be caused by future changes, both KASAN hook and initialization memset() are put together and a warning comment is added. Combining setting allocation tags with memory initialization improves HW_TAGS KASAN performance when init_on_free is enabled. Link: https://lkml.kernel.org/r/190fd15c1886654afdec0d19ebebd5ade665b601.1615296150.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Christoph Lameter Cc: David Rientjes Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Joonsoo Kim Cc: Kevin Brodsky Cc: Pekka Enberg Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 10 ++++++---- mm/kasan/common.c | 13 +++++++------ mm/slab.c | 15 +++++++++++---- mm/slub.c | 43 ++++++++++++++++++++++++------------------- 4 files changed, 48 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 629aee484b6c..b1678a61e6a7 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -203,11 +203,13 @@ static __always_inline void * __must_check kasan_init_slab_obj( return (void *)object; } -bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); -static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object) +bool __kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip, bool init); +static __always_inline bool kasan_slab_free(struct kmem_cache *s, + void *object, bool init) { if (kasan_enabled()) - return __kasan_slab_free(s, object, _RET_IP_); + return __kasan_slab_free(s, object, _RET_IP_, init); return false; } @@ -313,7 +315,7 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache, { return (void *)object; } -static inline bool kasan_slab_free(struct kmem_cache *s, void *object) +static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) { return false; } diff --git a/mm/kasan/common.c b/mm/kasan/common.c index ac0d4ed9c921..6bb87f2acd4e 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -322,8 +322,8 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, return (void *)object; } -static inline bool ____kasan_slab_free(struct kmem_cache *cache, - void *object, unsigned long ip, bool quarantine) +static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, + unsigned long ip, bool quarantine, bool init) { u8 tag; void *tagged_object; @@ -351,7 +351,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, } kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), - KASAN_KMALLOC_FREE, false); + KASAN_KMALLOC_FREE, init); if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine)) return false; @@ -362,9 +362,10 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, return kasan_quarantine_put(cache, object); } -bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) +bool __kasan_slab_free(struct kmem_cache *cache, void *object, + unsigned long ip, bool init) { - return ____kasan_slab_free(cache, object, ip, true); + return ____kasan_slab_free(cache, object, ip, true, init); } static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) @@ -409,7 +410,7 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) return; kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false); } else { - ____kasan_slab_free(page->slab_cache, ptr, ip, false); + ____kasan_slab_free(page->slab_cache, ptr, ip, false, false); } } diff --git a/mm/slab.c b/mm/slab.c index 84f183e9b31a..df45c437b394 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3425,17 +3425,24 @@ free_done: static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, unsigned long caller) { + bool init; + if (is_kfence_address(objp)) { kmemleak_free_recursive(objp, cachep->flags); __kfence_free(objp); return; } - if (unlikely(slab_want_init_on_free(cachep))) + /* + * As memory initialization might be integrated into KASAN, + * kasan_slab_free and initialization memset must be + * kept together to avoid discrepancies in behavior. + */ + init = slab_want_init_on_free(cachep); + if (init && !kasan_has_integrated_init()) memset(objp, 0, cachep->object_size); - - /* Put the object into the quarantine, don't touch it for now. */ - if (kasan_slab_free(cachep, objp)) + /* KASAN might put objp into memory quarantine, delaying its reuse. */ + if (kasan_slab_free(cachep, objp, init)) return; /* Use KCSAN to help debug racy use-after-free. */ diff --git a/mm/slub.c b/mm/slub.c index 5cf35250f20c..68123b21e65f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1533,7 +1533,8 @@ static __always_inline void kfree_hook(void *x) kasan_kfree_large(x); } -static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) +static __always_inline bool slab_free_hook(struct kmem_cache *s, + void *x, bool init) { kmemleak_free_recursive(x, s->flags); @@ -1559,8 +1560,25 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) __kcsan_check_access(x, s->object_size, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); - /* KASAN might put x into memory quarantine, delaying its reuse */ - return kasan_slab_free(s, x); + /* + * As memory initialization might be integrated into KASAN, + * kasan_slab_free and initialization memset's must be + * kept together to avoid discrepancies in behavior. + * + * The initialization memset's clear the object and the metadata, + * but don't touch the SLAB redzone. + */ + if (init) { + int rsize; + + if (!kasan_has_integrated_init()) + memset(kasan_reset_tag(x), 0, s->object_size); + rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; + memset((char *)kasan_reset_tag(x) + s->inuse, 0, + s->size - s->inuse - rsize); + } + /* KASAN might put x into memory quarantine, delaying its reuse. */ + return kasan_slab_free(s, x, init); } static inline bool slab_free_freelist_hook(struct kmem_cache *s, @@ -1570,10 +1588,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, void *object; void *next = *head; void *old_tail = *tail ? *tail : *head; - int rsize; if (is_kfence_address(next)) { - slab_free_hook(s, next); + slab_free_hook(s, next, false); return true; } @@ -1585,20 +1602,8 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, object = next; next = get_freepointer(s, object); - if (slab_want_init_on_free(s)) { - /* - * Clear the object and the metadata, but don't touch - * the redzone. - */ - memset(kasan_reset_tag(object), 0, s->object_size); - rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad - : 0; - memset((char *)kasan_reset_tag(object) + s->inuse, 0, - s->size - s->inuse - rsize); - - } /* If object's reuse doesn't have to be delayed */ - if (!slab_free_hook(s, object)) { + if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { /* Move object to the new freelist */ set_freepointer(s, object, *head); *head = object; @@ -3236,7 +3241,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, } if (is_kfence_address(object)) { - slab_free_hook(df->s, object); + slab_free_hook(df->s, object, false); __kfence_free(object); p[size] = NULL; /* mark object processed */ return size; -- cgit v1.2.3-71-gd317 From 1f9d03c5e999ed5a57fa4d8aec9fdf67a6234b80 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 29 Apr 2021 23:00:55 -0700 Subject: mm: move mem_init_print_info() into mm_init() mem_init_print_info() is called in mem_init() on each architecture, and pass NULL argument, so using void argument and move it into mm_init(). Link: https://lkml.kernel.org/r/20210317015210.33641-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Acked-by: Dave Hansen [x86] Reviewed-by: Christophe Leroy [powerpc] Acked-by: David Hildenbrand Tested-by: Anatoly Pugachev [sparc64] Acked-by: Russell King [arm] Acked-by: Mike Rapoport Cc: Catalin Marinas Cc: Richard Henderson Cc: Guo Ren Cc: Yoshinori Sato Cc: Huacai Chen Cc: Jonas Bonn Cc: Palmer Dabbelt Cc: Heiko Carstens Cc: "David S. Miller" Cc: "Peter Zijlstra" Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/mm/init.c | 1 - arch/arc/mm/init.c | 1 - arch/arm/mm/init.c | 2 -- arch/arm64/mm/init.c | 2 -- arch/csky/mm/init.c | 1 - arch/h8300/mm/init.c | 2 -- arch/hexagon/mm/init.c | 1 - arch/ia64/mm/init.c | 1 - arch/m68k/mm/init.c | 1 - arch/microblaze/mm/init.c | 1 - arch/mips/loongson64/numa.c | 1 - arch/mips/mm/init.c | 1 - arch/mips/sgi-ip27/ip27-memory.c | 1 - arch/nds32/mm/init.c | 1 - arch/nios2/mm/init.c | 1 - arch/openrisc/mm/init.c | 2 -- arch/parisc/mm/init.c | 2 -- arch/powerpc/mm/mem.c | 1 - arch/riscv/mm/init.c | 1 - arch/s390/mm/init.c | 2 -- arch/sh/mm/init.c | 1 - arch/sparc/mm/init_32.c | 2 -- arch/sparc/mm/init_64.c | 1 - arch/um/kernel/mem.c | 1 - arch/x86/mm/init_32.c | 2 -- arch/x86/mm/init_64.c | 2 -- arch/xtensa/mm/init.c | 1 - include/linux/mm.h | 2 +- init/main.c | 1 + mm/page_alloc.c | 10 +++++----- 30 files changed, 7 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 3c42b3147fd6..a97650a618f1 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -282,5 +282,4 @@ mem_init(void) set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); - mem_init_print_info(NULL); } diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index ce07e697916c..33832e36bdb7 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -194,7 +194,6 @@ void __init mem_init(void) { memblock_free_all(); highmem_init(); - mem_init_print_info(NULL); } #ifdef CONFIG_HIGHMEM diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 828a2561b229..7022b7b5c400 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -316,8 +316,6 @@ void __init mem_init(void) free_highpages(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 470f92e6a542..ef031511ce29 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -491,8 +491,6 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can be * detected at build time already. diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index 894050a8ce09..bf2004aa811a 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -107,7 +107,6 @@ void __init mem_init(void) free_highmem_page(page); } #endif - mem_init_print_info(NULL); } void free_initmem(void) diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index 1f3b345d68b9..f7bf4693e3b2 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -98,6 +98,4 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - - mem_init_print_info(NULL); } diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index f2e6c868e477..f01e91e10d95 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -55,7 +55,6 @@ void __init mem_init(void) { /* No idea where this is actually declared. Seems to evade LXR. */ memblock_free_all(); - mem_init_print_info(NULL); /* * To-Do: someone somewhere should wipe out the bootmem map diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 97a13eda81bf..064a967a7b6e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -449,7 +449,6 @@ mem_init (void) set_max_mapnr(max_low_pfn); high_memory = __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); - mem_init_print_info(NULL); /* * For fsyscall entrpoints with no light-weight handler, use the ordinary diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 14c1e541451c..1759ab875d47 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -153,5 +153,4 @@ void __init mem_init(void) /* this will put all memory onto the freelists */ memblock_free_all(); init_pointer_tables(); - mem_init_print_info(NULL); } diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 05cf1fb3f5ff..ab55c70380a5 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -131,7 +131,6 @@ void __init mem_init(void) highmem_setup(); #endif - mem_init_print_info(NULL); mem_init_done = 1; } diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index 8315c871c435..fa9b4a487a47 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -178,7 +178,6 @@ void __init mem_init(void) high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ - mem_init_print_info(NULL); } /* All PCI device belongs to logical Node-0 */ diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 5cb73bf74a8b..c36358758969 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -467,7 +467,6 @@ void __init mem_init(void) memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ mem_init_free_highmem(); - mem_init_print_info(NULL); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 87bb6945ec25..6173684b5aaa 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -420,5 +420,4 @@ void __init mem_init(void) high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ - mem_init_print_info(NULL); } diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c index fa86f7b2f416..f63f839738c4 100644 --- a/arch/nds32/mm/init.c +++ b/arch/nds32/mm/init.c @@ -191,7 +191,6 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 61862dbb0e32..613fcaa5988a 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -71,7 +71,6 @@ void __init mem_init(void) /* this will put all memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); } void __init mmu_init(void) diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index bf9b2310fc93..d5641198b90c 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -211,8 +211,6 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); - printk("mem_init_done ...........................................\n"); mem_init_done = 1; return; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 9ca4e4ff6895..591a4e939415 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -573,8 +573,6 @@ void __init mem_init(void) #endif parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); - mem_init_print_info(NULL); - #if 0 /* * Do not expose the virtual kernel memory layout to userspace. diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 4e8ce6d85232..7e11c4cb08b8 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -312,7 +312,6 @@ void __init mem_init(void) (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif - mem_init_print_info(NULL); #ifdef CONFIG_PPC32 pr_info("Kernel virtual memory layout:\n"); #ifdef CONFIG_KASAN diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 067583ab1bd7..92e39cfa5227 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -102,7 +102,6 @@ void __init mem_init(void) high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); memblock_free_all(); - mem_init_print_info(NULL); print_vm_layout(); } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0e76b2127dc6..8ac710de1ab1 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -209,8 +209,6 @@ void __init mem_init(void) setup_zero_pages(); /* Setup zeroed pages. */ cmma_init_nodat(); - - mem_init_print_info(NULL); } void free_initmem(void) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 0db6919af8d3..168d7d4dd735 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -359,7 +359,6 @@ void __init mem_init(void) vsyscall_init(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 6139c5700ccc..1e9f577f084d 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -292,8 +292,6 @@ void __init mem_init(void) map_high_region(start_pfn, end_pfn); } - - mem_init_print_info(NULL); } void sparc_flush_page_to_ram(struct page *page) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 182bb7bdaa0a..e454f179cf5d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2520,7 +2520,6 @@ void __init mem_init(void) } mark_page_reserved(mem_map_zero); - mem_init_print_info(NULL); if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_ecache_flush_init(); diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 9242dc91d751..9019ff5905b1 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -54,7 +54,6 @@ void __init mem_init(void) memblock_free_all(); max_low_pfn = totalram_pages(); max_pfn = max_low_pfn; - mem_init_print_info(NULL); kmalloc_ok = 1; } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index da31c2635ee4..21ffb03f6c72 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -755,8 +755,6 @@ void __init mem_init(void) after_bootmem = 1; x86_init.hyper.init_after_bootmem(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 58ae2f746b3e..e527d829e1ed 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1351,8 +1351,6 @@ void __init mem_init(void) kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); preallocate_vmalloc_pages(); - - mem_init_print_info(NULL); } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 2daeba9e454e..6a32b2cf2718 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -119,7 +119,6 @@ void __init mem_init(void) memblock_free_all(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" #ifdef CONFIG_KASAN " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" diff --git a/include/linux/mm.h b/include/linux/mm.h index 64b2e3a0b94d..011f43605807 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2360,7 +2360,7 @@ extern unsigned long free_reserved_area(void *start, void *end, int poison, const char *s); extern void adjust_managed_page_count(struct page *page, long count); -extern void mem_init_print_info(const char *str); +extern void mem_init_print_info(void); extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); diff --git a/init/main.c b/init/main.c index ae96c79ad2d3..dd11bfd10ead 100644 --- a/init/main.c +++ b/init/main.c @@ -830,6 +830,7 @@ static void __init mm_init(void) report_meminit(); stack_depot_init(); mem_init(); + mem_init_print_info(); /* page_owner must be initialized after buddy is ready */ page_ext_init_flatmem_late(); kmem_cache_init(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6314de5387f5..b55073d0e84a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7734,7 +7734,7 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char return pages; } -void __init mem_init_print_info(const char *str) +void __init mem_init_print_info(void) { unsigned long physpages, codesize, datasize, rosize, bss_size; unsigned long init_code_size, init_data_size; @@ -7773,17 +7773,17 @@ void __init mem_init_print_info(const char *str) #ifdef CONFIG_HIGHMEM ", %luK highmem" #endif - "%s%s)\n", + ")\n", nr_free_pages() << (PAGE_SHIFT - 10), physpages << (PAGE_SHIFT - 10), codesize >> 10, datasize >> 10, rosize >> 10, (init_data_size + init_code_size) >> 10, bss_size >> 10, (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10), - totalcma_pages << (PAGE_SHIFT - 10), + totalcma_pages << (PAGE_SHIFT - 10) #ifdef CONFIG_HIGHMEM - totalhigh_pages() << (PAGE_SHIFT - 10), + , totalhigh_pages() << (PAGE_SHIFT - 10) #endif - str ? ", " : "", str ? str : ""); + ); } /** -- cgit v1.2.3-71-gd317 From f73c6c8805ed0762d99122d5332fcf42b0c8fbb8 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Thu, 29 Apr 2021 23:01:04 -0700 Subject: include/linux/page-flags-layout.h: correctly determine LAST_CPUPID_WIDTH The naming convention used in include/linux/page-flags-layout.h: *_SHIFT: the number of bits trying to allocate *_WIDTH: the number of bits successfully allocated So when it comes to LAST_CPUPID_WIDTH, we need to check whether all previous *_WIDTH and LAST_CPUPID_SHIFT can fit into page flags. This means we need to use NODES_WIDTH, not NODES_SHIFT. Link: https://lkml.kernel.org/r/20210303071609.797782-1-yuzhao@google.com Signed-off-by: Yu Zhao Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags-layout.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 7d4ec26d8a3e..295c2c687d2c 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -83,7 +83,7 @@ #define KASAN_TAG_WIDTH 0 #endif -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \ +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_WIDTH+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \ <= BITS_PER_LONG - NR_PAGEFLAGS #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #else -- cgit v1.2.3-71-gd317 From 1587db62d8c0dbd943752f657b452213e1c4d8d4 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Thu, 29 Apr 2021 23:01:07 -0700 Subject: include/linux/page-flags-layout.h: cleanups Tidy things up and delete comments stating the obvious with typos or making no sense. Link: https://lkml.kernel.org/r/20210303071609.797782-2-yuzhao@google.com Signed-off-by: Yu Zhao Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags-layout.h | 62 ++++++++++++++++++--------------------- mm/mm_init.c | 4 --- 2 files changed, 29 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 295c2c687d2c..ef1e3e736e14 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -21,16 +21,17 @@ #elif MAX_NR_ZONES <= 8 #define ZONES_SHIFT 3 #else -#error ZONES_SHIFT -- too many zones configured adjust calculation +#error ZONES_SHIFT "Too many zones configured" #endif +#define ZONES_WIDTH ZONES_SHIFT + #ifdef CONFIG_SPARSEMEM #include - -/* SECTION_SHIFT #bits space required to store a section # */ #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) - -#endif /* CONFIG_SPARSEMEM */ +#else +#define SECTIONS_SHIFT 0 +#endif #ifndef BUILD_VDSO32_64 /* @@ -54,17 +55,28 @@ #define SECTIONS_WIDTH 0 #endif -#define ZONES_WIDTH ZONES_SHIFT - -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS #define NODES_WIDTH NODES_SHIFT -#else -#ifdef CONFIG_SPARSEMEM_VMEMMAP +#elif defined(CONFIG_SPARSEMEM_VMEMMAP) #error "Vmemmap: No space for nodes field in page flags" -#endif +#else #define NODES_WIDTH 0 #endif +/* + * Note that this #define MUST have a value so that it can be tested with + * the IS_ENABLED() macro. + */ +#if NODES_SHIFT != 0 && NODES_WIDTH == 0 +#define NODE_NOT_IN_PAGE_FLAGS 1 +#endif + +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) +#define KASAN_TAG_WIDTH 8 +#else +#define KASAN_TAG_WIDTH 0 +#endif + #ifdef CONFIG_NUMA_BALANCING #define LAST__PID_SHIFT 8 #define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1) @@ -77,36 +89,20 @@ #define LAST_CPUPID_SHIFT 0 #endif -#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) -#define KASAN_TAG_WIDTH 8 -#else -#define KASAN_TAG_WIDTH 0 -#endif - -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_WIDTH+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \ +#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT \ <= BITS_PER_LONG - NR_PAGEFLAGS #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #else #define LAST_CPUPID_WIDTH 0 #endif -#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ - > BITS_PER_LONG - NR_PAGEFLAGS -#error "Not enough bits in page flags" -#endif - -/* - * We are going to use the flags for the page to node mapping if its in - * there. This includes the case where there is no node, so it is implicit. - * Note that this #define MUST have a value so that it can be tested with - * the IS_ENABLED() macro. - */ -#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) -#define NODE_NOT_IN_PAGE_FLAGS 1 +#if LAST_CPUPID_SHIFT != 0 && LAST_CPUPID_WIDTH == 0 +#define LAST_CPUPID_NOT_IN_PAGE_FLAGS #endif -#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0 -#define LAST_CPUPID_NOT_IN_PAGE_FLAGS +#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH \ + > BITS_PER_LONG - NR_PAGEFLAGS +#error "Not enough bits in page flags" #endif #endif diff --git a/mm/mm_init.c b/mm/mm_init.c index 8e02e865cc65..9ddaf0e1b0ab 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -19,10 +19,6 @@ #ifdef CONFIG_DEBUG_MEMORY_INIT int __meminitdata mminit_loglevel; -#ifndef SECTIONS_SHIFT -#define SECTIONS_SHIFT 0 -#endif - /* The zonelists are simply reported, validation is manual. */ void __init mminit_verify_zonelist(void) { -- cgit v1.2.3-71-gd317 From 84172f4bb752424415756351a40f8da5714e1554 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 23:01:15 -0700 Subject: mm/page_alloc: combine __alloc_pages and __alloc_pages_nodemask There are only two callers of __alloc_pages() so prune the thicket of alloc_page variants by combining the two functions together. Current callers of __alloc_pages() simply add an extra 'NULL' parameter and current callers of __alloc_pages_nodemask() call __alloc_pages() instead. Link: https://lkml.kernel.org/r/20210225150642.2582252-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Mike Rapoport Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/admin-guide/mm/transhuge.rst | 2 +- include/linux/gfp.h | 13 +++---------- mm/hugetlb.c | 2 +- mm/internal.h | 4 ++-- mm/mempolicy.c | 6 +++--- mm/migrate.c | 2 +- mm/page_alloc.c | 5 ++--- 7 files changed, 13 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 3b8a336511a4..c9c37f16eef8 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -402,7 +402,7 @@ compact_fail but failed. It is possible to establish how long the stalls were using the function -tracer to record how long was spent in __alloc_pages_nodemask and +tracer to record how long was spent in __alloc_pages() and using the mm_page_alloc tracepoint to identify which allocations were for huge pages. diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 8572a1474e16..f39de931bdf9 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -515,15 +515,8 @@ static inline int arch_make_page_accessible(struct page *page) } #endif -struct page * -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, - nodemask_t *nodemask); - -static inline struct page * -__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) -{ - return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); -} +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, + nodemask_t *nodemask); /* * Allocate pages, preferring the node given as nid. The node must be valid and @@ -535,7 +528,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); - return __alloc_pages(gfp_mask, order, nid); + return __alloc_pages(gfp_mask, order, nid, NULL); } /* diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a86a58ef132d..6c72433bec1e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1616,7 +1616,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, gfp_mask |= __GFP_RETRY_MAYFAIL; if (nid == NUMA_NO_NODE) nid = numa_mem_id(); - page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); + page = __alloc_pages(gfp_mask, order, nid, nmask); if (page) __count_vm_event(HTLB_BUDDY_PGALLOC); else diff --git a/mm/internal.h b/mm/internal.h index 42e30e71554a..ef5f336f59bd 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -145,10 +145,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); * family of functions. * * nodemask, migratetype and highest_zoneidx are initialized only once in - * __alloc_pages_nodemask() and then never change. + * __alloc_pages() and then never change. * * zonelist, preferred_zone and highest_zoneidx are set first in - * __alloc_pages_nodemask() for the fast path, and might be later changed + * __alloc_pages() for the fast path, and might be later changed * in __alloc_pages_slowpath(). All other functions pass the whole structure * by a const pointer. */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ab51132547b8..5f0d20298736 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2140,7 +2140,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, { struct page *page; - page = __alloc_pages(gfp, order, nid); + page = __alloc_pages(gfp, order, nid, NULL); /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ if (!static_branch_likely(&vm_numa_stat_key)) return page; @@ -2237,7 +2237,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); - page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); + page = __alloc_pages(gfp, order, preferred_nid, nmask); mpol_cond_put(pol); out: return page; @@ -2274,7 +2274,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else - page = __alloc_pages_nodemask(gfp, order, + page = __alloc_pages(gfp, order, policy_node(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol)); diff --git a/mm/migrate.c b/mm/migrate.c index 62b81d5257aa..47df0df8f21a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1617,7 +1617,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private) if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; - new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask); + new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); if (new_page && PageTransHuge(new_page)) prep_transhuge_page(new_page); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c565ebad02ee..fce4b9180bdb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5013,8 +5013,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, /* * This is the 'heart' of the zoned buddy allocator. */ -struct page * -__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid, +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask) { struct page *page; @@ -5076,7 +5075,7 @@ out: return page; } -EXPORT_SYMBOL(__alloc_pages_nodemask); +EXPORT_SYMBOL(__alloc_pages); /* * Common helper functions. Never use with __GFP_HIGHMEM because the returned -- cgit v1.2.3-71-gd317 From d7f946d0faf90014547ee5d090e9d05018278c7a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Apr 2021 23:01:18 -0700 Subject: mm/mempolicy: rename alloc_pages_current to alloc_pages When CONFIG_NUMA is enabled, alloc_pages() is a wrapper around alloc_pages_current(). This is pointless, just implement alloc_pages() directly. Link: https://lkml.kernel.org/r/20210225150642.2582252-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Mike Rapoport Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 8 +------- mm/mempolicy.c | 6 +++--- 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f39de931bdf9..0a88f84b08f4 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -546,13 +546,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, } #ifdef CONFIG_NUMA -extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); - -static inline struct page * -alloc_pages(gfp_t gfp_mask, unsigned int order) -{ - return alloc_pages_current(gfp_mask, order); -} +struct page *alloc_pages(gfp_t gfp, unsigned int order); extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, int node, bool hugepage); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5f0d20298736..c71532b7e3f8 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2245,7 +2245,7 @@ out: EXPORT_SYMBOL(alloc_pages_vma); /** - * alloc_pages_current - Allocate pages. + * alloc_pages - Allocate pages. * * @gfp: * %GFP_USER user allocation, @@ -2259,7 +2259,7 @@ EXPORT_SYMBOL(alloc_pages_vma); * interrupt context and apply the current process NUMA policy. * Returns NULL when no page can be allocated. */ -struct page *alloc_pages_current(gfp_t gfp, unsigned order) +struct page *alloc_pages(gfp_t gfp, unsigned order) { struct mempolicy *pol = &default_policy; struct page *page; @@ -2280,7 +2280,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) return page; } -EXPORT_SYMBOL(alloc_pages_current); +EXPORT_SYMBOL(alloc_pages); int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) { -- cgit v1.2.3-71-gd317 From 387ba26fb1cb9be9e35dc14a6d97188e916eda05 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 29 Apr 2021 23:01:45 -0700 Subject: mm/page_alloc: add a bulk page allocator This patch adds a new page allocator interface via alloc_pages_bulk, and __alloc_pages_bulk_nodemask. A caller requests a number of pages to be allocated and added to a list. The API is not guaranteed to return the requested number of pages and may fail if the preferred allocation zone has limited free memory, the cpuset changes during the allocation or page debugging decides to fail an allocation. It's up to the caller to request more pages in batch if necessary. Note that this implementation is not very efficient and could be improved but it would require refactoring. The intent is to make it available early to determine what semantics are required by different callers. Once the full semantics are nailed down, it can be refactored. [mgorman@techsingularity.net: fix alloc_pages_bulk() return type, per Matthew] Link: https://lkml.kernel.org/r/20210325123713.GQ3697@techsingularity.net [mgorman@techsingularity.net: fix uninit var warning] Link: https://lkml.kernel.org/r/20210330114847.GX3697@techsingularity.net [mgorman@techsingularity.net: fix comment, per Vlastimil] Link: https://lkml.kernel.org/r/20210412110255.GV3697@techsingularity.net Link: https://lkml.kernel.org/r/20210325114228.27719-3-mgorman@techsingularity.net Signed-off-by: Mel Gorman Acked-by: Vlastimil Babka Reviewed-by: Alexander Lobakin Tested-by: Colin Ian King Cc: Alexander Duyck Cc: Christoph Hellwig Cc: Chuck Lever Cc: David Miller Cc: Ilias Apalodimas Cc: Jesper Dangaard Brouer Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 11 +++++ mm/page_alloc.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 129 insertions(+) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0a88f84b08f4..a2be8f4174a9 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -518,6 +518,17 @@ static inline int arch_make_page_accessible(struct page *page) struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); +unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + nodemask_t *nodemask, int nr_pages, + struct list_head *list); + +/* Bulk allocate order-0 pages */ +static inline unsigned long +alloc_pages_bulk(gfp_t gfp, unsigned long nr_pages, struct list_head *list) +{ + return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list); +} + /* * Allocate pages, preferring the node given as nid. The node must be valid and * online. For more general interface, see alloc_pages_node(). diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fd283d9b6a4d..d8209d48a543 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5006,6 +5006,124 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, return true; } +/* + * __alloc_pages_bulk - Allocate a number of order-0 pages to a list + * @gfp: GFP flags for the allocation + * @preferred_nid: The preferred NUMA node ID to allocate from + * @nodemask: Set of nodes to allocate from, may be NULL + * @nr_pages: The number of pages desired on the list + * @page_list: List to store the allocated pages + * + * This is a batched version of the page allocator that attempts to + * allocate nr_pages quickly and add them to a list. + * + * Returns the number of pages on the list. + */ +unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + nodemask_t *nodemask, int nr_pages, + struct list_head *page_list) +{ + struct page *page; + unsigned long flags; + struct zone *zone; + struct zoneref *z; + struct per_cpu_pages *pcp; + struct list_head *pcp_list; + struct alloc_context ac; + gfp_t alloc_gfp; + unsigned int alloc_flags = ALLOC_WMARK_LOW; + int allocated = 0; + + if (WARN_ON_ONCE(nr_pages <= 0)) + return 0; + + /* Use the single page allocator for one page. */ + if (nr_pages == 1) + goto failed; + + /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ + gfp &= gfp_allowed_mask; + alloc_gfp = gfp; + if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) + return 0; + gfp = alloc_gfp; + + /* Find an allowed local zone that meets the low watermark. */ + for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { + unsigned long mark; + + if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && + !__cpuset_zone_allowed(zone, gfp)) { + continue; + } + + if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && + zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { + goto failed; + } + + mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; + if (zone_watermark_fast(zone, 0, mark, + zonelist_zone_idx(ac.preferred_zoneref), + alloc_flags, gfp)) { + break; + } + } + + /* + * If there are no allowed local zones that meets the watermarks then + * try to allocate a single page and reclaim if necessary. + */ + if (!zone) + goto failed; + + /* Attempt the batch allocation */ + local_irq_save(flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + pcp_list = &pcp->lists[ac.migratetype]; + + while (allocated < nr_pages) { + page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags, + pcp, pcp_list); + if (!page) { + /* Try and get at least one page */ + if (!allocated) + goto failed_irq; + break; + } + + /* + * Ideally this would be batched but the best way to do + * that cheaply is to first convert zone_statistics to + * be inaccurate per-cpu counter like vm_events to avoid + * a RMW cycle then do the accounting with IRQs enabled. + */ + __count_zid_vm_events(PGALLOC, zone_idx(zone), 1); + zone_statistics(ac.preferred_zoneref->zone, zone); + + prep_new_page(page, 0, gfp, 0); + list_add(&page->lru, page_list); + allocated++; + } + + local_irq_restore(flags); + + return allocated; + +failed_irq: + local_irq_restore(flags); + +failed: + page = __alloc_pages(gfp, 0, preferred_nid, nodemask); + if (page) { + list_add(&page->lru, page_list); + allocated = 1; + } + + return allocated; +} +EXPORT_SYMBOL_GPL(__alloc_pages_bulk); + /* * This is the 'heart' of the zoned buddy allocator. */ -- cgit v1.2.3-71-gd317 From 0f87d9d30f21390dd71114f30e63038980e6eb3f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 29 Apr 2021 23:01:48 -0700 Subject: mm/page_alloc: add an array-based interface to the bulk page allocator The proposed callers for the bulk allocator store pages from the bulk allocator in an array. This patch adds an array-based interface to the API to avoid multiple list iterations. The page list interface is preserved to avoid requiring all users of the bulk API to allocate and manage enough storage to store the pages. [akpm@linux-foundation.org: remove now unused local `allocated'] Link: https://lkml.kernel.org/r/20210325114228.27719-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman Reviewed-by: Alexander Lobakin Acked-by: Vlastimil Babka Cc: Alexander Duyck Cc: Christoph Hellwig Cc: Chuck Lever Cc: David Miller Cc: Ilias Apalodimas Cc: Jesper Dangaard Brouer Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 13 +++++++++--- mm/page_alloc.c | 60 +++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 54 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index a2be8f4174a9..26f4d907254a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -520,13 +520,20 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, - struct list_head *list); + struct list_head *page_list, + struct page **page_array); /* Bulk allocate order-0 pages */ static inline unsigned long -alloc_pages_bulk(gfp_t gfp, unsigned long nr_pages, struct list_head *list) +alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) { - return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list); + return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL); +} + +static inline unsigned long +alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array) +{ + return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); } /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d8209d48a543..e240704b5d39 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5007,21 +5007,29 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, } /* - * __alloc_pages_bulk - Allocate a number of order-0 pages to a list + * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array * @gfp: GFP flags for the allocation * @preferred_nid: The preferred NUMA node ID to allocate from * @nodemask: Set of nodes to allocate from, may be NULL - * @nr_pages: The number of pages desired on the list - * @page_list: List to store the allocated pages + * @nr_pages: The number of pages desired on the list or array + * @page_list: Optional list to store the allocated pages + * @page_array: Optional array to store the pages * * This is a batched version of the page allocator that attempts to - * allocate nr_pages quickly and add them to a list. + * allocate nr_pages quickly. Pages are added to page_list if page_list + * is not NULL, otherwise it is assumed that the page_array is valid. * - * Returns the number of pages on the list. + * For lists, nr_pages is the number of pages that should be allocated. + * + * For arrays, only NULL elements are populated with pages and nr_pages + * is the maximum number of pages that will be stored in the array. + * + * Returns the number of pages on the list or array. */ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, - struct list_head *page_list) + struct list_head *page_list, + struct page **page_array) { struct page *page; unsigned long flags; @@ -5032,13 +5040,20 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, struct alloc_context ac; gfp_t alloc_gfp; unsigned int alloc_flags = ALLOC_WMARK_LOW; - int allocated = 0; + int nr_populated = 0; if (WARN_ON_ONCE(nr_pages <= 0)) return 0; + /* + * Skip populated array elements to determine if any pages need + * to be allocated before disabling IRQs. + */ + while (page_array && page_array[nr_populated] && nr_populated < nr_pages) + nr_populated++; + /* Use the single page allocator for one page. */ - if (nr_pages == 1) + if (nr_pages - nr_populated == 1) goto failed; /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ @@ -5082,12 +5097,19 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, pcp = &this_cpu_ptr(zone->pageset)->pcp; pcp_list = &pcp->lists[ac.migratetype]; - while (allocated < nr_pages) { + while (nr_populated < nr_pages) { + + /* Skip existing pages */ + if (page_array && page_array[nr_populated]) { + nr_populated++; + continue; + } + page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags, pcp, pcp_list); if (!page) { /* Try and get at least one page */ - if (!allocated) + if (!nr_populated) goto failed_irq; break; } @@ -5102,13 +5124,16 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, zone_statistics(ac.preferred_zoneref->zone, zone); prep_new_page(page, 0, gfp, 0); - list_add(&page->lru, page_list); - allocated++; + if (page_list) + list_add(&page->lru, page_list); + else + page_array[nr_populated] = page; + nr_populated++; } local_irq_restore(flags); - return allocated; + return nr_populated; failed_irq: local_irq_restore(flags); @@ -5116,11 +5141,14 @@ failed_irq: failed: page = __alloc_pages(gfp, 0, preferred_nid, nodemask); if (page) { - list_add(&page->lru, page_list); - allocated = 1; + if (page_list) + list_add(&page->lru, page_list); + else + page_array[nr_populated] = page; + nr_populated++; } - return allocated; + return nr_populated; } EXPORT_SYMBOL_GPL(__alloc_pages_bulk); -- cgit v1.2.3-71-gd317 From 198fba4137a1803a9cb93992b56c2ecba1aa83a3 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Thu, 29 Apr 2021 23:02:16 -0700 Subject: mm/mmzone.h: fix existing kernel-doc comments and link them to core-api There are a couple of kernel-doc comments in include/linux/mmzone.h but they have minor formatting issues that would cause kernel-doc warnings. Fix the formatting of those comments, add missing Return: descriptions and link include/linux/mmzone.h to Documentation/core-api/mm-api.rst Link: https://lkml.kernel.org/r/20210426141927.1314326-2-rppt@kernel.org Signed-off-by: Mike Rapoport Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Anshuman Khandual Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/core-api/mm-api.rst | 1 + include/linux/mmzone.h | 43 ++++++++++++++++++++++----------------- 2 files changed, 25 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst index 34f46df91a8b..a42f9baddfbf 100644 --- a/Documentation/core-api/mm-api.rst +++ b/Documentation/core-api/mm-api.rst @@ -97,3 +97,4 @@ More Memory Management Functions :internal: .. kernel-doc:: include/linux/mm.h :internal: +.. kernel-doc:: include/linux/mmzone.h diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 47946cec7584..3b2205741048 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -993,7 +993,8 @@ static inline int is_highmem_idx(enum zone_type idx) * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. - * @zone - pointer to struct zone variable + * @zone: pointer to struct zone variable + * Return: 1 for a highmem zone, 0 otherwise */ static inline int is_highmem(struct zone *zone) { @@ -1044,7 +1045,7 @@ extern struct zone *next_zone(struct zone *zone); /** * for_each_online_pgdat - helper macro to iterate over all online nodes - * @pgdat - pointer to a pg_data_t variable + * @pgdat: pointer to a pg_data_t variable */ #define for_each_online_pgdat(pgdat) \ for (pgdat = first_online_pgdat(); \ @@ -1052,7 +1053,7 @@ extern struct zone *next_zone(struct zone *zone); pgdat = next_online_pgdat(pgdat)) /** * for_each_zone - helper macro to iterate over all memory zones - * @zone - pointer to struct zone variable + * @zone: pointer to struct zone variable * * The user only needs to declare the zone variable, for_each_zone * fills it in. @@ -1091,15 +1092,18 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z, /** * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point - * @z - The cursor used as a starting point for the search - * @highest_zoneidx - The zone index of the highest zone to return - * @nodes - An optional nodemask to filter the zonelist with + * @z: The cursor used as a starting point for the search + * @highest_zoneidx: The zone index of the highest zone to return + * @nodes: An optional nodemask to filter the zonelist with * * This function returns the next zone at or below a given zone index that is * within the allowed nodemask using a cursor as the starting point for the * search. The zoneref returned is a cursor that represents the current zone * being examined. It should be advanced by one before calling * next_zones_zonelist again. + * + * Return: the next zone at or below highest_zoneidx within the allowed + * nodemask using a cursor within a zonelist as a starting point */ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, @@ -1112,10 +1116,9 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, /** * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist - * @zonelist - The zonelist to search for a suitable zone - * @highest_zoneidx - The zone index of the highest zone to return - * @nodes - An optional nodemask to filter the zonelist with - * @return - Zoneref pointer for the first suitable zone found (see below) + * @zonelist: The zonelist to search for a suitable zone + * @highest_zoneidx: The zone index of the highest zone to return + * @nodes: An optional nodemask to filter the zonelist with * * This function returns the first zone at or below a given zone index that is * within the allowed nodemask. The zoneref returned is a cursor that can be @@ -1125,6 +1128,8 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is * never NULL). This may happen either genuinely, or due to concurrent nodemask * update due to cpuset modification. + * + * Return: Zoneref pointer for the first suitable zone found */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, @@ -1136,11 +1141,11 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, /** * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask - * @zone - The current zone in the iterator - * @z - The current pointer within zonelist->_zonerefs being iterated - * @zlist - The zonelist being iterated - * @highidx - The zone index of the highest zone to return - * @nodemask - Nodemask allowed by the allocator + * @zone: The current zone in the iterator + * @z: The current pointer within zonelist->_zonerefs being iterated + * @zlist: The zonelist being iterated + * @highidx: The zone index of the highest zone to return + * @nodemask: Nodemask allowed by the allocator * * This iterator iterates though all zones at or below a given zone index and * within a given nodemask @@ -1160,10 +1165,10 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, /** * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index - * @zone - The current zone in the iterator - * @z - The current pointer within zonelist->zones being iterated - * @zlist - The zonelist being iterated - * @highidx - The zone index of the highest zone to return + * @zone: The current zone in the iterator + * @z: The current pointer within zonelist->zones being iterated + * @zlist: The zonelist being iterated + * @highidx: The zone index of the highest zone to return * * This iterator iterates though all zones at or below a given zone index. */ -- cgit v1.2.3-71-gd317 From 384d0c68204a4a657f4bbc096c50d729ae7d9ef0 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 12 Feb 2021 11:02:47 +0100 Subject: PCI/VPD: Remove pci_set_vpd_size() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 24a1720a0841 ("cxgb4: collect serial config version from register") removed the only usage of pci_set_vpd_size(). If a device needs to override the auto-detected VPD size, then this can be done with a PCI quirk, as is done for Chelsio devices. There's no need to allow drivers to change the VPD size. Remove pci_set_vpd_size(). [bhelgaas: squash in Arnd's fix for "'pci_vpd_set_size' defined but not used" from https://lore.kernel.org/r/20210421140334.3847155-1-arnd@kernel.org] Link: https://lore.kernel.org/r/47d86e52-9bcf-7da7-1edb-0d988a7a82ab@gmail.com Signed-off-by: Heiner Kallweit Signed-off-by: Bjorn Helgaas Reviewed-by: Krzysztof Wilczyński --- drivers/pci/vpd.c | 58 ++++++++++++----------------------------------------- include/linux/pci.h | 1 - 2 files changed, 13 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index ab81c7a5185f..a1d31c5d1864 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -16,7 +16,6 @@ struct pci_vpd_ops { ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); - int (*set_size)(struct pci_dev *dev, size_t len); }; struct pci_vpd { @@ -60,19 +59,6 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void } EXPORT_SYMBOL(pci_write_vpd); -/** - * pci_set_vpd_size - Set size of Vital Product Data space - * @dev: pci device struct - * @len: size of vpd space - */ -int pci_set_vpd_size(struct pci_dev *dev, size_t len) -{ - if (!dev->vpd || !dev->vpd->ops) - return -ENODEV; - return dev->vpd->ops->set_size(dev, len); -} -EXPORT_SYMBOL(pci_set_vpd_size); - #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1) /** @@ -297,23 +283,9 @@ out: return ret ? ret : count; } -static int pci_vpd_set_size(struct pci_dev *dev, size_t len) -{ - struct pci_vpd *vpd = dev->vpd; - - if (len == 0 || len > PCI_VPD_MAX_SIZE) - return -EIO; - - vpd->valid = 1; - vpd->len = len; - - return 0; -} - static const struct pci_vpd_ops pci_vpd_ops = { .read = pci_vpd_read, .write = pci_vpd_write, - .set_size = pci_vpd_set_size, }; static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, @@ -346,24 +318,9 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, return ret; } -static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len) -{ - struct pci_dev *tdev = pci_get_slot(dev->bus, - PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); - int ret; - - if (!tdev) - return -ENODEV; - - ret = pci_set_vpd_size(tdev, len); - pci_dev_put(tdev); - return ret; -} - static const struct pci_vpd_ops pci_vpd_f0_ops = { .read = pci_vpd_f0_read, .write = pci_vpd_f0_write, - .set_size = pci_vpd_f0_set_size, }; int pci_vpd_init(struct pci_dev *dev) @@ -564,6 +521,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd); +static void pci_vpd_set_size(struct pci_dev *dev, size_t len) +{ + struct pci_vpd *vpd = dev->vpd; + + if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE) + return; + + vpd->valid = 1; + vpd->len = len; +} + static void quirk_chelsio_extend_vpd(struct pci_dev *dev) { int chip = (dev->device & 0xf000) >> 12; @@ -582,9 +550,9 @@ static void quirk_chelsio_extend_vpd(struct pci_dev *dev) * limits. */ if (chip == 0x0 && prod >= 0x20) - pci_set_vpd_size(dev, 8192); + pci_vpd_set_size(dev, 8192); else if (chip >= 0x4 && func < 0x8) - pci_set_vpd_size(dev, 2048); + pci_vpd_set_size(dev, 2048); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, diff --git a/include/linux/pci.h b/include/linux/pci.h index 86c799c97b77..edadc62ae058 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1302,7 +1302,6 @@ void pci_unlock_rescan_remove(void); /* Vital Product Data routines */ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); -int pci_set_vpd_size(struct pci_dev *dev, size_t len); /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); -- cgit v1.2.3-71-gd317 From 4cf0abbce69bde3d07757dfa9be6420407fdbc45 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 1 Apr 2021 18:43:15 +0200 Subject: PCI/VPD: Remove pci_vpd_find_tag() 'offset' argument All callers pass 0 as offset. Therefore remove the parameter and use a fixed offset 0 in pci_vpd_find_tag(). Link: https://lore.kernel.org/r/f62e6e19-5423-2ead-b2bd-62844b23ef8f@gmail.com Signed-off-by: Heiner Kallweit Signed-off-by: Bjorn Helgaas --- drivers/net/ethernet/broadcom/bnx2.c | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 3 +-- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/tg3.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 2 +- drivers/net/ethernet/sfc/efx.c | 2 +- drivers/net/ethernet/sfc/falcon/efx.c | 2 +- drivers/pci/vpd.c | 4 ++-- drivers/scsi/cxlflash/main.c | 3 +-- include/linux/pci.h | 3 +-- 10 files changed, 12 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 3e8a179f39db..c0986096c701 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8057,7 +8057,7 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp) data[i + 3] = data[i + BNX2_VPD_LEN]; } - i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto vpd_done; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b652ed72a621..d267e45a0518 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12207,8 +12207,7 @@ static void bnx2x_read_fwinfo(struct bnx2x *bp) /* VPD RO tag should be first tag after identifier string, hence * we should be able to find it in first BNX2X_VPD_LEN chars */ - i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, - PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a680fd9c68ea..2bccdac28a24 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -12668,7 +12668,7 @@ static void bnxt_vpd_read_info(struct bnxt *bp) goto exit; } - i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); if (i < 0) { netdev_err(bp->dev, "VPD READ-Only not found\n"); goto exit; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d2381929931b..b0e49643f483 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -13016,7 +13016,7 @@ static int tg3_test_nvram(struct tg3 *tp) if (!buf) return -ENOMEM; - i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA); if (i > 0) { j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); if (j < 0) @@ -15629,7 +15629,7 @@ static void tg3_read_vpd(struct tg3 *tp) if (!vpd_data) goto out_no_vpd; - i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 98829e482bfa..ef5d10e1cce6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2774,7 +2774,7 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) if (id_len > ID_LEN) id_len = ID_LEN; - i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) { dev_err(adapter->pdev_dev, "missing VPD-R section\n"); ret = -EINVAL; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 36c8625a6fd7..c746ca7235f1 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -920,7 +920,7 @@ static void efx_probe_vpd_strings(struct efx_nic *efx) } /* Get the Read only section */ - ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); if (ro_start < 0) { netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); return; diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index f8979991970e..5e7a57b680ca 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2800,7 +2800,7 @@ static void ef4_probe_vpd_strings(struct ef4_nic *efx) } /* Get the Read only section */ - ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); if (ro_start < 0) { netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); return; diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index c1c4f7d80a04..79d4313c91a3 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -410,11 +410,11 @@ void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev) } } -int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt) +int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt) { int i; - for (i = off; i < len; ) { + for (i = 0; i < len; ) { u8 val = buf[i]; if (val & PCI_VPD_LRDT) { diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index e72440d919d2..ae0963cec222 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -1649,8 +1649,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) } /* Get the read only section offset */ - ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, - PCI_VPD_LRDT_RO_DATA); + ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); if (unlikely(ro_start < 0)) { dev_err(dev, "%s: VPD Read-only data not found\n", __func__); rc = -ENODEV; diff --git a/include/linux/pci.h b/include/linux/pci.h index edadc62ae058..1eb35c09674e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2310,14 +2310,13 @@ static inline u8 pci_vpd_info_field_size(const u8 *info_field) /** * pci_vpd_find_tag - Locates the Resource Data Type tag provided * @buf: Pointer to buffered vpd data - * @off: The offset into the buffer at which to begin the search * @len: The length of the vpd buffer * @rdt: The Resource Data Type to search for * * Returns the index where the Resource Data Type was found or * -ENOENT otherwise. */ -int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); +int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt); /** * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD -- cgit v1.2.3-71-gd317 From d0f9164eb294aeb884cbe36ddbbae34fa0124aa1 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 6 Apr 2021 20:04:44 +0300 Subject: vdpa: Follow kdoc comment style Follow comment style mentioned in the Writing kernel-doc document [1]. Following warnings are fixed. $ scripts/kernel-doc -v -none include/linux/vdpa.h include/linux/vdpa.h:11: warning: missing initial short description on line: * vDPA callback definition. include/linux/vdpa.h:11: info: Scanning doc for vDPA include/linux/vdpa.h:15: warning: cannot understand function prototype: 'struct vdpa_callback ' include/linux/vdpa.h:21: warning: missing initial short description on line: * vDPA notification area include/linux/vdpa.h:21: info: Scanning doc for vDPA include/linux/vdpa.h:25: warning: cannot understand function prototype: 'struct vdpa_notification_area ' include/linux/vdpa.h:31: warning: missing initial short description on line: * vDPA vq_state definition include/linux/vdpa.h:31: info: Scanning doc for vDPA include/linux/vdpa.h:34: warning: cannot understand function prototype: 'struct vdpa_vq_state ' include/linux/vdpa.h:41: info: Scanning doc for vDPA device include/linux/vdpa.h:51: warning: cannot understand function prototype: 'struct vdpa_device ' include/linux/vdpa.h:62: info: Scanning doc for vDPA IOVA range include/linux/vdpa.h:66: warning: cannot understand function prototype: 'struct vdpa_iova_range ' include/linux/vdpa.h:72: info: Scanning doc for vDPA_config_ops include/linux/vdpa.h:203: warning: cannot understand function prototype: 'struct vdpa_config_ops ' include/linux/vdpa.h:270: info: Scanning doc for vdpa_driver include/linux/vdpa.h:275: warning: cannot understand function prototype: 'struct vdpa_driver ' include/linux/vdpa.h:347: info: Scanning doc for vdpa_mgmtdev_ops include/linux/vdpa.h:360: warning: cannot understand function prototype: 'struct vdpa_mgmtdev_ops ' After this fix: scripts/kernel-doc -v -none include/linux/vdpa.h include/linux/vdpa.h:11: info: Scanning doc for struct vdpa_calllback include/linux/vdpa.h:21: info: Scanning doc for struct vdpa_notification_area include/linux/vdpa.h:31: info: Scanning doc for struct vdpa_vq_state include/linux/vdpa.h:41: info: Scanning doc for struct vdpa_device include/linux/vdpa.h:62: info: Scanning doc for struct vdpa_iova_range include/linux/vdpa.h:72: info: Scanning doc for struct vdpa_config_ops include/linux/vdpa.h:270: info: Scanning doc for struct vdpa_driver include/linux/vdpa.h:347: info: Scanning doc for struct vdpa_mgmtdev_ops [1] https://www.kernel.org/doc/html/latest/doc-guide/kernel-doc.html Signed-off-by: Parav Pandit Reviewed-by: Eli Cohen Link: https://lore.kernel.org/r/20210406170457.98481-2-parav@nvidia.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang Acked-by: Jason Wang --- include/linux/vdpa.h | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 15fa085fab05..37b65ca940cf 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -8,7 +8,7 @@ #include /** - * vDPA callback definition. + * struct vdpa_calllback - vDPA callback definition. * @callback: interrupt callback function * @private: the data passed to the callback function */ @@ -18,7 +18,7 @@ struct vdpa_callback { }; /** - * vDPA notification area + * struct vdpa_notification_area - vDPA notification area * @addr: base address of the notification area * @size: size of the notification area */ @@ -28,7 +28,7 @@ struct vdpa_notification_area { }; /** - * vDPA vq_state definition + * struct vdpa_vq_state - vDPA vq_state definition * @avail_index: available index */ struct vdpa_vq_state { @@ -38,7 +38,7 @@ struct vdpa_vq_state { struct vdpa_mgmt_dev; /** - * vDPA device - representation of a vDPA device + * struct vdpa_device - representation of a vDPA device * @dev: underlying device * @dma_dev: the actual device that is performing DMA * @config: the configuration ops for this device. @@ -59,7 +59,7 @@ struct vdpa_device { }; /** - * vDPA IOVA range - the IOVA range support by the device + * struct vdpa_iova_range - the IOVA range support by the device * @first: start of the IOVA range * @last: end of the IOVA range */ @@ -69,7 +69,7 @@ struct vdpa_iova_range { }; /** - * vDPA_config_ops - operations for configuring a vDPA device. + * struct vdpa_config_ops - operations for configuring a vDPA device. * Note: vDPA device drivers are required to implement all of the * operations unless it is mentioned to be optional in the following * list. @@ -267,7 +267,7 @@ int _vdpa_register_device(struct vdpa_device *vdev, int nvqs); void _vdpa_unregister_device(struct vdpa_device *vdev); /** - * vdpa_driver - operations for a vDPA driver + * struct vdpa_driver - operations for a vDPA driver * @driver: underlying device driver * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. @@ -344,18 +344,18 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, } /** - * vdpa_mgmtdev_ops - vdpa device ops - * @dev_add: Add a vdpa device using alloc and register - * @mdev: parent device to use for device addition - * @name: name of the new vdpa device - * Driver need to add a new device using _vdpa_register_device() - * after fully initializing the vdpa device. Driver must return 0 - * on success or appropriate error code. - * @dev_del: Remove a vdpa device using unregister - * @mdev: parent device to use for device removal - * @dev: vdpa device to remove - * Driver need to remove the specified device by calling - * _vdpa_unregister_device(). + * struct vdpa_mgmtdev_ops - vdpa device ops + * @dev_add: Add a vdpa device using alloc and register + * @mdev: parent device to use for device addition + * @name: name of the new vdpa device + * Driver need to add a new device using _vdpa_register_device() + * after fully initializing the vdpa device. Driver must return 0 + * on success or appropriate error code. + * @dev_del: Remove a vdpa device using unregister + * @mdev: parent device to use for device removal + * @dev: vdpa device to remove + * Driver need to remove the specified device by calling + * _vdpa_unregister_device(). */ struct vdpa_mgmtdev_ops { int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name); -- cgit v1.2.3-71-gd317 From 9e3bb9b79a7131a088cfffbdcc30e747dad9d090 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 15 Apr 2021 03:31:41 -0400 Subject: virtio_pci_modern: introduce helper to map vq notify area This patch factors out the logic of vq notify area mapping. Following patches will switch to use this common helpers for both virtio_pci library and virtio-pci vDPA driver. Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20210415073147.19331-2-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin Reviewed-by: Eli Cohen --- drivers/virtio/virtio_pci_modern_dev.c | 35 ++++++++++++++++++++++++++++++++++ include/linux/virtio_pci_modern.h | 2 ++ 2 files changed, 37 insertions(+) (limited to 'include/linux') diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index cbd667496bb1..28cb5847fafa 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -593,6 +593,41 @@ u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, } EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off); +/* + * vp_modern_map_vq_notify - map notification area for a + * specific virtqueue + * @mdev: the modern virtio-pci device + * @index: the queue index + * + * Returns the address of the notification area + */ +void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, + u16 index) +{ + u16 off = vp_modern_get_queue_notify_off(mdev, index); + + if (mdev->notify_base) { + /* offset should not wrap */ + if ((u64)off * mdev->notify_offset_multiplier + 2 + > mdev->notify_len) { + dev_warn(&mdev->pci_dev->dev, + "bad notification offset %u (x %u) " + "for queue %u > %zd", + off, mdev->notify_offset_multiplier, + index, mdev->notify_len); + return NULL; + } + return (void __force *)mdev->notify_base + + off * mdev->notify_offset_multiplier; + } else { + return (void __force *)vp_modern_map_capability(mdev, + mdev->notify_map_cap, 2, 2, + off * mdev->notify_offset_multiplier, 2, + NULL); + } +} +EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify); + MODULE_VERSION("0.1"); MODULE_DESCRIPTION("Modern Virtio PCI Device"); MODULE_AUTHOR("Jason Wang "); diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index f26acbeec965..1b95d39b00fc 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -106,6 +106,8 @@ void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, in u32 align, u32 start, u32 size, size_t *len); +void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, + u16 index); int vp_modern_probe(struct virtio_pci_modern_device *mdev); void vp_modern_remove(struct virtio_pci_modern_device *mdev); #endif -- cgit v1.2.3-71-gd317 From a5f7a24f49d81fab9f59611814a8817cc8a876a2 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 15 Apr 2021 03:31:44 -0400 Subject: virtio_pci_modern: hide vp_modern_get_queue_notify_off() All users (both virtio-pci library and vp_vdpa driver) has been switched to use vp_modern_map_vq_notify(). So there's no need to export the low level helper of vp_modern_get_queue_notify_off(). Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20210415073147.19331-5-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin Reviewed-by: Eli Cohen --- drivers/virtio/virtio_pci_modern_dev.c | 5 ++--- include/linux/virtio_pci_modern.h | 2 -- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index 28cb5847fafa..5a657e56b46d 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -584,14 +584,13 @@ EXPORT_SYMBOL_GPL(vp_modern_get_num_queues); * * Returns the notification offset for a virtqueue */ -u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, - u16 index) +static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, + u16 index) { vp_iowrite16(index, &mdev->common->queue_select); return vp_ioread16(&mdev->common->queue_notify_off); } -EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off); /* * vp_modern_map_vq_notify - map notification area for a diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index 1b95d39b00fc..179a2fb4bf37 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -99,8 +99,6 @@ void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, u16 idx); u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); -u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, - u16 idx); void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, size_t minlen, u32 align, -- cgit v1.2.3-71-gd317 From fd466b36940b22a506265edf12714bd0cf9ed836 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 15 Apr 2021 03:31:45 -0400 Subject: virito_pci libray: hide vp_modern_map_capability() No user now and the capability should not be setup externally. Instead, every access to the capability should be done via virtio_pci_modern_device. Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20210415073147.19331-6-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin Reviewed-by: Eli Cohen --- drivers/virtio/virtio_pci_modern_dev.c | 10 ++++------ include/linux/virtio_pci_modern.h | 5 ----- 2 files changed, 4 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index 5a657e56b46d..9c241c9bd920 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -16,11 +16,10 @@ * * Returns the io address of for the part of the capability */ -void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, - size_t minlen, - u32 align, - u32 start, u32 size, - size_t *len) +static void __iomem * +vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, + size_t minlen, u32 align, u32 start, u32 size, + size_t *len) { struct pci_dev *dev = mdev->pci_dev; u8 bar; @@ -90,7 +89,6 @@ void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, in length, offset, bar); return p; } -EXPORT_SYMBOL_GPL(vp_modern_map_capability); /** * virtio_pci_find_capability - walk capabilities to find device info. diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index 179a2fb4bf37..e6e7072413c1 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -99,11 +99,6 @@ void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, u16 idx); u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); -void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, - size_t minlen, - u32 align, - u32 start, u32 size, - size_t *len); void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, u16 index); int vp_modern_probe(struct virtio_pci_modern_device *mdev); -- cgit v1.2.3-71-gd317 From 9e311bcad73dc14bd0a736db6ad3d382227e11fe Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 15 Apr 2021 03:31:46 -0400 Subject: virtio-pci library: report resource address Sometimes it might be useful to report the capability physical address. One example is to report the physical address of the doorbell in order to be mapped by userspace. Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20210415073147.19331-7-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/virtio_pci/vp_vdpa.c | 3 ++- drivers/virtio/virtio_pci_modern.c | 2 +- drivers/virtio/virtio_pci_modern_dev.c | 24 +++++++++++++++++------- include/linux/virtio_pci_modern.h | 4 +++- 4 files changed, 23 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index 2afc90645660..98205e54d089 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -415,7 +415,8 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) for (i = 0; i < vp_vdpa->queues; i++) { vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; - vp_vdpa->vring[i].notify = vp_modern_map_vq_notify(mdev, i); + vp_vdpa->vring[i].notify = + vp_modern_map_vq_notify(mdev, i, NULL); if (!vp_vdpa->vring[i].notify) { dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i); goto err; diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 29607d9bd95c..722ea44e7579 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -224,7 +224,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, virtqueue_get_avail_addr(vq), virtqueue_get_used_addr(vq)); - vq->priv = vp_modern_map_vq_notify(mdev, index); + vq->priv = vp_modern_map_vq_notify(mdev, index, NULL); if (!vq->priv) { err = -ENOMEM; goto err_map_notify; diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index 9c241c9bd920..ae87b3fa8858 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -13,13 +13,14 @@ * @start: start from the capability * @size: map size * @len: the length that is actually mapped + * @pa: physical address of the capability * * Returns the io address of for the part of the capability */ static void __iomem * vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, size_t minlen, u32 align, u32 start, u32 size, - size_t *len) + size_t *len, resource_size_t *pa) { struct pci_dev *dev = mdev->pci_dev; u8 bar; @@ -87,6 +88,9 @@ vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, dev_err(&dev->dev, "virtio_pci: unable to map virtio %u@%u on bar %i\n", length, offset, bar); + else if (pa) + *pa = pci_resource_start(dev, bar) + offset; + return p; } @@ -273,12 +277,12 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev) mdev->common = vp_modern_map_capability(mdev, common, sizeof(struct virtio_pci_common_cfg), 4, 0, sizeof(struct virtio_pci_common_cfg), - NULL); + NULL, NULL); if (!mdev->common) goto err_map_common; mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1, 0, 1, - NULL); + NULL, NULL); if (!mdev->isr) goto err_map_isr; @@ -306,7 +310,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev) mdev->notify_base = vp_modern_map_capability(mdev, notify, 2, 2, 0, notify_length, - &mdev->notify_len); + &mdev->notify_len, + &mdev->notify_pa); if (!mdev->notify_base) goto err_map_notify; } else { @@ -319,7 +324,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev) if (device) { mdev->device = vp_modern_map_capability(mdev, device, 0, 4, 0, PAGE_SIZE, - &mdev->device_len); + &mdev->device_len, + NULL); if (!mdev->device) goto err_map_device; } @@ -595,11 +601,12 @@ static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, * specific virtqueue * @mdev: the modern virtio-pci device * @index: the queue index + * @pa: the pointer to the physical address of the nofity area * * Returns the address of the notification area */ void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, - u16 index) + u16 index, resource_size_t *pa) { u16 off = vp_modern_get_queue_notify_off(mdev, index); @@ -614,13 +621,16 @@ void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, index, mdev->notify_len); return NULL; } + if (pa) + *pa = mdev->notify_pa + + off * mdev->notify_offset_multiplier; return (void __force *)mdev->notify_base + off * mdev->notify_offset_multiplier; } else { return (void __force *)vp_modern_map_capability(mdev, mdev->notify_map_cap, 2, 2, off * mdev->notify_offset_multiplier, 2, - NULL); + NULL, pa); } } EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify); diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index e6e7072413c1..cdfabbefacdf 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -13,6 +13,8 @@ struct virtio_pci_modern_device { void __iomem *device; /* Base of vq notifications (non-legacy mode). */ void __iomem *notify_base; + /* Physical base of vq notifications */ + resource_size_t notify_pa; /* Where to read and clear interrupt */ u8 __iomem *isr; @@ -100,7 +102,7 @@ u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, u16 idx); u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, - u16 index); + u16 index, resource_size_t *pa); int vp_modern_probe(struct virtio_pci_modern_device *mdev); void vp_modern_remove(struct virtio_pci_modern_device *mdev); #endif -- cgit v1.2.3-71-gd317 From f53d9910d009bc015b42d88114e2d86a93b0e6b7 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Mon, 15 Mar 2021 17:34:38 +0100 Subject: vringh: add 'iotlb_lock' to synchronize iotlb accesses Usually iotlb accesses are synchronized with a spinlock. Let's request it as a new parameter in vringh_set_iotlb() and hold it when we navigate the iotlb in iotlb_translate() to avoid race conditions with any new additions/deletions of ranges from the ioltb. Acked-by: Jason Wang Signed-off-by: Stefano Garzarella Link: https://lore.kernel.org/r/20210315163450.254396-3-sgarzare@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/vdpa_sim/vdpa_sim.c | 3 ++- drivers/vhost/vringh.c | 9 ++++++++- include/linux/vringh.h | 6 +++++- 3 files changed, 15 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index fc2ec9599753..a92c08880098 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -284,7 +284,8 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) goto err_iommu; for (i = 0; i < dev_attr->nvqs; i++) - vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu); + vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu, + &vdpasim->iommu_lock); ret = iova_cache_get(); if (ret) diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 85d85faba058..f68122705719 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -1074,6 +1074,8 @@ static int iotlb_translate(const struct vringh *vrh, int ret = 0; u64 s = 0; + spin_lock(vrh->iotlb_lock); + while (len > s) { u64 size, pa, pfn; @@ -1103,6 +1105,8 @@ static int iotlb_translate(const struct vringh *vrh, ++ret; } + spin_unlock(vrh->iotlb_lock); + return ret; } @@ -1262,10 +1266,13 @@ EXPORT_SYMBOL(vringh_init_iotlb); * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. * @vrh: the vring * @iotlb: iotlb associated with this vring + * @iotlb_lock: spinlock to synchronize the iotlb accesses */ -void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb) +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb, + spinlock_t *iotlb_lock) { vrh->iotlb = iotlb; + vrh->iotlb_lock = iotlb_lock; } EXPORT_SYMBOL(vringh_set_iotlb); diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 59bd50f99291..9c077863c8f6 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -46,6 +46,9 @@ struct vringh { /* IOTLB for this vring */ struct vhost_iotlb *iotlb; + /* spinlock to synchronize IOTLB accesses */ + spinlock_t *iotlb_lock; + /* The function to call to notify the guest about added buffers */ void (*notify)(struct vringh *); }; @@ -258,7 +261,8 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) #if IS_REACHABLE(CONFIG_VHOST_IOTLB) -void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb); +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb, + spinlock_t *iotlb_lock); int vringh_init_iotlb(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, -- cgit v1.2.3-71-gd317 From b8c06ad4d67db56ed6bdfb685c134da74e92a2c7 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Mon, 15 Mar 2021 17:34:41 +0100 Subject: vringh: implement vringh_kiov_advance() In some cases, it may be useful to provide a way to skip a number of bytes in a vringh_kiov. Let's implement vringh_kiov_advance() for this purpose, reusing the code from vringh_iov_xfer(). We replace that code calling the new vringh_kiov_advance(). Acked-by: Jason Wang Signed-off-by: Stefano Garzarella Link: https://lore.kernel.org/r/20210315163450.254396-6-sgarzare@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vringh.c | 41 +++++++++++++++++++++++++++++------------ include/linux/vringh.h | 2 ++ 2 files changed, 31 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 2a88e087afd8..4af8fa259d65 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -75,6 +75,34 @@ static inline int __vringh_get_head(const struct vringh *vrh, return head; } +/** + * vringh_kiov_advance - skip bytes from vring_kiov + * @iov: an iov passed to vringh_getdesc_*() (updated as we consume) + * @len: the maximum length to advance + */ +void vringh_kiov_advance(struct vringh_kiov *iov, size_t len) +{ + while (len && iov->i < iov->used) { + size_t partlen = min(iov->iov[iov->i].iov_len, len); + + iov->consumed += partlen; + iov->iov[iov->i].iov_len -= partlen; + iov->iov[iov->i].iov_base += partlen; + + if (!iov->iov[iov->i].iov_len) { + /* Fix up old iov element then increment. */ + iov->iov[iov->i].iov_len = iov->consumed; + iov->iov[iov->i].iov_base -= iov->consumed; + + iov->consumed = 0; + iov->i++; + } + + len -= partlen; + } +} +EXPORT_SYMBOL(vringh_kiov_advance); + /* Copy some bytes to/from the iovec. Returns num copied. */ static inline ssize_t vringh_iov_xfer(struct vringh *vrh, struct vringh_kiov *iov, @@ -95,19 +123,8 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh, done += partlen; len -= partlen; ptr += partlen; - iov->consumed += partlen; - iov->iov[iov->i].iov_len -= partlen; - iov->iov[iov->i].iov_base += partlen; - if (!iov->iov[iov->i].iov_len) { - /* Fix up old iov element then increment. */ - iov->iov[iov->i].iov_len = iov->consumed; - iov->iov[iov->i].iov_base -= iov->consumed; - - - iov->consumed = 0; - iov->i++; - } + vringh_kiov_advance(iov, partlen); } return done; } diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 9c077863c8f6..755211ebd195 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -199,6 +199,8 @@ static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov) kiov->iov = NULL; } +void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len); + int vringh_getdesc_kern(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, -- cgit v1.2.3-71-gd317 From 14c9ac05ce09c8c6a89ffcca6ffb68707cba36c2 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Mon, 15 Mar 2021 17:34:42 +0100 Subject: vringh: add vringh_kiov_length() helper This new helper returns the total number of bytes covered by a vringh_kiov. Suggested-by: Jason Wang Acked-by: Jason Wang Signed-off-by: Stefano Garzarella Link: https://lore.kernel.org/r/20210315163450.254396-7-sgarzare@redhat.com Signed-off-by: Michael S. Tsirkin --- include/linux/vringh.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 755211ebd195..84db7b8f912f 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -199,6 +199,17 @@ static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov) kiov->iov = NULL; } +static inline size_t vringh_kiov_length(struct vringh_kiov *kiov) +{ + size_t len = 0; + int i; + + for (i = kiov->i; i < kiov->used; i++) + len += kiov->iov[i].iov_len; + + return len; +} + void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len); int vringh_getdesc_kern(struct vringh *vrh, -- cgit v1.2.3-71-gd317 From 442706f9f94d28fe3c9f188ae4ebbd6b40addffe Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Mon, 15 Mar 2021 17:34:44 +0100 Subject: vdpa: add get_config_size callback in vdpa_config_ops This new callback is used to get the size of the configuration space of vDPA devices. Signed-off-by: Stefano Garzarella Link: https://lore.kernel.org/r/20210315163450.254396-9-sgarzare@redhat.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vdpa/ifcvf/ifcvf_main.c | 6 ++++++ drivers/vdpa/mlx5/net/mlx5_vnet.c | 6 ++++++ drivers/vdpa/vdpa_sim/vdpa_sim.c | 9 +++++++++ drivers/vdpa/virtio_pci/vp_vdpa.c | 8 ++++++++ include/linux/vdpa.h | 4 ++++ 5 files changed, 33 insertions(+) (limited to 'include/linux') diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index a16753f1de5b..44d7586019da 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -351,6 +351,11 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) return IFCVF_QUEUE_ALIGNMENT; } +static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev) +{ + return sizeof(struct virtio_net_config); +} + static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev, unsigned int offset, void *buf, unsigned int len) @@ -411,6 +416,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = { .get_device_id = ifcvf_vdpa_get_device_id, .get_vendor_id = ifcvf_vdpa_get_vendor_id, .get_vq_align = ifcvf_vdpa_get_vq_align, + .get_config_size = ifcvf_vdpa_get_config_size, .get_config = ifcvf_vdpa_get_config, .set_config = ifcvf_vdpa_set_config, .set_config_cb = ifcvf_vdpa_set_config_cb, diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 25533db01f5f..189e4385df40 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1809,6 +1809,11 @@ err_setup: ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; } +static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev) +{ + return sizeof(struct virtio_net_config); +} + static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len) { @@ -1895,6 +1900,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .get_vendor_id = mlx5_vdpa_get_vendor_id, .get_status = mlx5_vdpa_get_status, .set_status = mlx5_vdpa_set_status, + .get_config_size = mlx5_vdpa_get_config_size, .get_config = mlx5_vdpa_get_config, .set_config = mlx5_vdpa_set_config, .get_generation = mlx5_vdpa_get_generation, diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 14dc2d3d983e..98f793bc9376 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -462,6 +462,13 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) spin_unlock(&vdpasim->lock); } +static size_t vdpasim_get_config_size(struct vdpa_device *vdpa) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + return vdpasim->dev_attr.config_size; +} + static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, void *buf, unsigned int len) { @@ -598,6 +605,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = { .get_vendor_id = vdpasim_get_vendor_id, .get_status = vdpasim_get_status, .set_status = vdpasim_set_status, + .get_config_size = vdpasim_get_config_size, .get_config = vdpasim_get_config, .set_config = vdpasim_set_config, .get_generation = vdpasim_get_generation, @@ -625,6 +633,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = { .get_vendor_id = vdpasim_get_vendor_id, .get_status = vdpasim_get_status, .set_status = vdpasim_set_status, + .get_config_size = vdpasim_get_config_size, .get_config = vdpasim_get_config, .set_config = vdpasim_set_config, .get_generation = vdpasim_get_generation, diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index 002b928d0ca1..c76ebb531212 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -296,6 +296,13 @@ static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa) return PAGE_SIZE; } +static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return mdev->device_len; +} + static void vp_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset, void *buf, unsigned int len) @@ -369,6 +376,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = { .get_device_id = vp_vdpa_get_device_id, .get_vendor_id = vp_vdpa_get_vendor_id, .get_vq_align = vp_vdpa_get_vq_align, + .get_config_size = vp_vdpa_get_config_size, .get_config = vp_vdpa_get_config, .set_config = vp_vdpa_set_config, .set_config_cb = vp_vdpa_set_config_cb, diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 37b65ca940cf..f311d227aa1b 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -150,6 +150,9 @@ struct vdpa_iova_range { * @set_status: Set the device status * @vdev: vdpa device * @status: virtio device status + * @get_config_size: Get the size of the configuration space + * @vdev: vdpa device + * Returns size_t: configuration size * @get_config: Read from device specific configuration space * @vdev: vdpa device * @offset: offset from the beginning of @@ -231,6 +234,7 @@ struct vdpa_config_ops { u32 (*get_vendor_id)(struct vdpa_device *vdev); u8 (*get_status)(struct vdpa_device *vdev); void (*set_status)(struct vdpa_device *vdev, u8 status); + size_t (*get_config_size)(struct vdpa_device *vdev); void (*get_config)(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len); void (*set_config)(struct vdpa_device *vdev, unsigned int offset, -- cgit v1.2.3-71-gd317 From 801c6058d14a82179a7ee17a4b532cac6fad067f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 29 Apr 2021 15:19:37 +0000 Subject: bpf: Fix leakage of uninitialized bpf stack under speculation The current implemented mechanisms to mitigate data disclosure under speculation mainly address stack and map value oob access from the speculative domain. However, Piotr discovered that uninitialized BPF stack is not protected yet, and thus old data from the kernel stack, potentially including addresses of kernel structures, could still be extracted from that 512 bytes large window. The BPF stack is special compared to map values since it's not zero initialized for every program invocation, whereas map values /are/ zero initialized upon their initial allocation and thus cannot leak any prior data in either domain. In the non-speculative domain, the verifier ensures that every stack slot read must have a prior stack slot write by the BPF program to avoid such data leaking issue. However, this is not enough: for example, when the pointer arithmetic operation moves the stack pointer from the last valid stack offset to the first valid offset, the sanitation logic allows for any intermediate offsets during speculative execution, which could then be used to extract any restricted stack content via side-channel. Given for unprivileged stack pointer arithmetic the use of unknown but bounded scalars is generally forbidden, we can simply turn the register-based arithmetic operation into an immediate-based arithmetic operation without the need for masking. This also gives the benefit of reducing the needed instructions for the operation. Given after the work in 7fedb63a8307 ("bpf: Tighten speculative pointer arithmetic mask"), the aux->alu_limit already holds the final immediate value for the offset register with the known scalar. Thus, a simple mov of the immediate to AX register with using AX as the source for the original instruction is sufficient and possible now in this case. Reported-by: Piotr Krysiuk Signed-off-by: Daniel Borkmann Tested-by: Piotr Krysiuk Reviewed-by: Piotr Krysiuk Reviewed-by: John Fastabend Acked-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 5 +++-- kernel/bpf/verifier.c | 27 +++++++++++++++++---------- 2 files changed, 20 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 6023a1367853..06841517ab1e 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -302,10 +302,11 @@ struct bpf_verifier_state_list { }; /* Possible states for alu_state member. */ -#define BPF_ALU_SANITIZE_SRC 1U -#define BPF_ALU_SANITIZE_DST 2U +#define BPF_ALU_SANITIZE_SRC (1U << 0) +#define BPF_ALU_SANITIZE_DST (1U << 1) #define BPF_ALU_NEG_VALUE (1U << 2) #define BPF_ALU_NON_POINTER (1U << 3) +#define BPF_ALU_IMMEDIATE (1U << 4) #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ BPF_ALU_SANITIZE_DST) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index db347fb125e3..757476c91c98 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6496,6 +6496,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, { struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux; struct bpf_verifier_state *vstate = env->cur_state; + bool off_is_imm = tnum_is_const(off_reg->var_off); bool off_is_neg = off_reg->smin_value < 0; bool ptr_is_dst_reg = ptr_reg == dst_reg; u8 opcode = BPF_OP(insn->code); @@ -6526,6 +6527,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, alu_limit = abs(tmp_aux->alu_limit - alu_limit); } else { alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; + alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; alu_state |= ptr_is_dst_reg ? BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; } @@ -12371,7 +12373,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; struct bpf_insn *patch = &insn_buf[0]; - bool issrc, isneg; + bool issrc, isneg, isimm; u32 off_reg; aux = &env->insn_aux_data[i + delta]; @@ -12382,16 +12384,21 @@ static int do_misc_fixups(struct bpf_verifier_env *env) isneg = aux->alu_state & BPF_ALU_NEG_VALUE; issrc = (aux->alu_state & BPF_ALU_SANITIZE) == BPF_ALU_SANITIZE_SRC; + isimm = aux->alu_state & BPF_ALU_IMMEDIATE; off_reg = issrc ? insn->src_reg : insn->dst_reg; - if (isneg) - *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); - *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); - *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); - *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); - *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); - *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); - *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); + if (isimm) { + *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); + } else { + if (isneg) + *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); + *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); + *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); + *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); + *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); + *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); + *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); + } if (!issrc) *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); insn->src_reg = BPF_REG_AX; @@ -12399,7 +12406,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) insn->code = insn->code == code_add ? code_sub : code_add; *patch++ = *insn; - if (issrc && isneg) + if (issrc && isneg && !isimm) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); cnt = patch - insn_buf; -- cgit v1.2.3-71-gd317 From cd2c7545ae1beac3b6aae033c7f31193b3255946 Mon Sep 17 00:00:00 2001 From: Changheun Lee Date: Mon, 3 May 2021 18:52:03 +0900 Subject: bio: limit bio max size bio size can grow up to 4GB when muli-page bvec is enabled. but sometimes it would lead to inefficient behaviors. in case of large chunk direct I/O, - 32MB chunk read in user space - all pages for 32MB would be merged to a bio structure if the pages physical addresses are contiguous. it makes some delay to submit until merge complete. bio max size should be limited to a proper size. When 32MB chunk read with direct I/O option is coming from userspace, kernel behavior is below now in do_direct_IO() loop. it's timeline. | bio merge for 32MB. total 8,192 pages are merged. | total elapsed time is over 2ms. |------------------ ... ----------------------->| | 8,192 pages merged a bio. | at this time, first bio submit is done. | 1 bio is split to 32 read request and issue. |---------------> |---------------> |---------------> ...... |---------------> |--------------->| total 19ms elapsed to complete 32MB read done from device. | If bio max size is limited with 1MB, behavior is changed below. | bio merge for 1MB. 256 pages are merged for each bio. | total 32 bio will be made. | total elapsed time is over 2ms. it's same. | but, first bio submit timing is fast. about 100us. |--->|--->|--->|---> ... -->|--->|--->|--->|--->| | 256 pages merged a bio. | at this time, first bio submit is done. | and 1 read request is issued for 1 bio. |---------------> |---------------> |---------------> ...... |---------------> |--------------->| total 17ms elapsed to complete 32MB read done from device. | As a result, read request issue timing is faster if bio max size is limited. Current kernel behavior with multipage bvec, super large bio can be created. And it lead to delay first I/O request issue. Signed-off-by: Changheun Lee Reviewed-by: Bart Van Assche Link: https://lore.kernel.org/r/20210503095203.29076-1-nanich.lee@samsung.com Signed-off-by: Jens Axboe --- block/bio.c | 13 +++++++++++-- block/blk-settings.c | 5 +++++ include/linux/bio.h | 4 +++- include/linux/blkdev.h | 2 ++ 4 files changed, 21 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index 44205dfb6b60..221dc56ba22f 100644 --- a/block/bio.c +++ b/block/bio.c @@ -255,6 +255,13 @@ void bio_init(struct bio *bio, struct bio_vec *table, } EXPORT_SYMBOL(bio_init); +unsigned int bio_max_size(struct bio *bio) +{ + struct block_device *bdev = bio->bi_bdev; + + return bdev ? bdev->bd_disk->queue->limits.bio_max_bytes : UINT_MAX; +} + /** * bio_reset - reinitialize a bio * @bio: bio to reset @@ -866,7 +873,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { - if (bio->bi_iter.bi_size > UINT_MAX - len) { + if (bio->bi_iter.bi_size > bio_max_size(bio) - len) { *same_page = false; return false; } @@ -995,6 +1002,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; + unsigned int bytes_left = bio_max_size(bio) - bio->bi_iter.bi_size; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct page **pages = (struct page **)bv; bool same_page = false; @@ -1010,7 +1018,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); - size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); + size = iov_iter_get_pages(iter, pages, bytes_left, nr_pages, + &offset); if (unlikely(size <= 0)) return size ? size : -EFAULT; diff --git a/block/blk-settings.c b/block/blk-settings.c index 9c009090c4b5..c646503e55d2 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -31,6 +31,7 @@ EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); */ void blk_set_default_limits(struct queue_limits *lim) { + lim->bio_max_bytes = UINT_MAX; lim->max_segments = BLK_MAX_SEGMENTS; lim->max_discard_segments = 1; lim->max_integrity_segments = 0; @@ -139,6 +140,10 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto limits->logical_block_size >> SECTOR_SHIFT); limits->max_sectors = max_sectors; + if (check_shl_overflow(max_sectors, SECTOR_SHIFT, + &limits->bio_max_bytes)) + limits->bio_max_bytes = UINT_MAX; + q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); } EXPORT_SYMBOL(blk_queue_max_hw_sectors); diff --git a/include/linux/bio.h b/include/linux/bio.h index a0b4cfdf62a4..f1a99f0a240c 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -106,6 +106,8 @@ static inline void *bio_data(struct bio *bio) return NULL; } +extern unsigned int bio_max_size(struct bio *bio); + /** * bio_full - check if the bio is full * @bio: bio to check @@ -119,7 +121,7 @@ static inline bool bio_full(struct bio *bio, unsigned len) if (bio->bi_vcnt >= bio->bi_max_vecs) return true; - if (bio->bi_iter.bi_size > UINT_MAX - len) + if (bio->bi_iter.bi_size > bio_max_size(bio) - len) return true; return false; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b91ba6207365..40c7c4d87aa1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -327,6 +327,8 @@ enum blk_bounce { }; struct queue_limits { + unsigned int bio_max_bytes; + enum blk_bounce bounce; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; -- cgit v1.2.3-71-gd317 From 48582b2e3b87b794a9845d488af2c76ce055502b Mon Sep 17 00:00:00 2001 From: Jim Quinlan Date: Fri, 30 Apr 2021 11:21:54 -0400 Subject: reset: add missing empty function reset_control_rearm() All other functions are defined for when CONFIG_RESET_CONTROLLER is not set. Fixes: 557acb3d2cd9 ("reset: make shared pulsed reset controls re-triggerable") Link: https://lore.kernel.org/r/20210430152156.21162-2-jim2101024@gmail.com Signed-off-by: Jim Quinlan Signed-off-by: Bjorn Helgaas Cc: stable@vger.kernel.org # v5.11+ --- include/linux/reset.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/reset.h b/include/linux/reset.h index b9109efa2a5c..9700124affa3 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -47,6 +47,11 @@ static inline int reset_control_reset(struct reset_control *rstc) return 0; } +static inline int reset_control_rearm(struct reset_control *rstc) +{ + return 0; +} + static inline int reset_control_assert(struct reset_control *rstc) { return 0; -- cgit v1.2.3-71-gd317 From 43016d02cf6e46edfc4696452251d34bba0c0435 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 3 May 2021 13:51:15 +0200 Subject: netfilter: arptables: use pernet ops struct during unregister Like with iptables and ebtables, hook unregistration has to use the pernet ops struct, not the template. This triggered following splat: hook not found, pf 3 num 0 WARNING: CPU: 0 PID: 224 at net/netfilter/core.c:480 __nf_unregister_net_hook+0x1eb/0x610 net/netfilter/core.c:480 [..] nf_unregister_net_hook net/netfilter/core.c:502 [inline] nf_unregister_net_hooks+0x117/0x160 net/netfilter/core.c:576 arpt_unregister_table_pre_exit+0x67/0x80 net/ipv4/netfilter/arp_tables.c:1565 Fixes: f9006acc8dfe5 ("netfilter: arp_tables: pass table pointer via nf_hook_ops") Reported-by: syzbot+dcccba8a1e41a38cb9df@syzkaller.appspotmail.com Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_arp/arp_tables.h | 3 +-- net/ipv4/netfilter/arp_tables.c | 5 ++--- net/ipv4/netfilter/arptable_filter.c | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 2aab9612f6ab..4f9a4b3c5892 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -53,8 +53,7 @@ int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops); void arpt_unregister_table(struct net *net, const char *name); -void arpt_unregister_table_pre_exit(struct net *net, const char *name, - const struct nf_hook_ops *ops); +void arpt_unregister_table_pre_exit(struct net *net, const char *name); extern unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index cf20316094d0..c53f14b94356 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1556,13 +1556,12 @@ out_free: return ret; } -void arpt_unregister_table_pre_exit(struct net *net, const char *name, - const struct nf_hook_ops *ops) +void arpt_unregister_table_pre_exit(struct net *net, const char *name) { struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name); if (table) - nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); + nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks)); } EXPORT_SYMBOL(arpt_unregister_table_pre_exit); diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index b8f45e9bbec8..6922612df456 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c @@ -54,7 +54,7 @@ static int __net_init arptable_filter_table_init(struct net *net) static void __net_exit arptable_filter_net_pre_exit(struct net *net) { - arpt_unregister_table_pre_exit(net, "filter", arpfilter_ops); + arpt_unregister_table_pre_exit(net, "filter"); } static void __net_exit arptable_filter_net_exit(struct net *net) -- cgit v1.2.3-71-gd317 From d7bce85aa7b92b5de8f69b3bcedfe51d7b1aabe1 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 4 May 2021 04:17:20 -0400 Subject: virtio_pci_modern: correct sparse tags for notify When switching virtio_pci_modern to use a helper for mappings we lost an __iomem tag. Restore it. Reported-by: kernel test robot Fixes: 9e3bb9b79a71 ("virtio_pci_modern: introduce helper to map vq notify area") Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_pci_modern_dev.c | 9 ++++----- include/linux/virtio_pci_modern.h | 4 ++-- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index ae87b3fa8858..54f297028586 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -605,8 +605,8 @@ static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, * * Returns the address of the notification area */ -void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, - u16 index, resource_size_t *pa) +void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, + u16 index, resource_size_t *pa) { u16 off = vp_modern_get_queue_notify_off(mdev, index); @@ -624,10 +624,9 @@ void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, if (pa) *pa = mdev->notify_pa + off * mdev->notify_offset_multiplier; - return (void __force *)mdev->notify_base + - off * mdev->notify_offset_multiplier; + return mdev->notify_base + off * mdev->notify_offset_multiplier; } else { - return (void __force *)vp_modern_map_capability(mdev, + return vp_modern_map_capability(mdev, mdev->notify_map_cap, 2, 2, off * mdev->notify_offset_multiplier, 2, NULL, pa); diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index cdfabbefacdf..6a95b58fd0f4 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -101,8 +101,8 @@ void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, u16 idx); u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); -void *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, - u16 index, resource_size_t *pa); +void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, + u16 index, resource_size_t *pa); int vp_modern_probe(struct virtio_pci_modern_device *mdev); void vp_modern_remove(struct virtio_pci_modern_device *mdev); #endif -- cgit v1.2.3-71-gd317 From 6e552494fb90acae005d74ce6a2ee102d965184b Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Tue, 4 May 2021 08:54:29 -0700 Subject: iomap: remove unused private field from ioend The only remaining user of ->io_private is the generic ioend merging infrastructure. The only user of that is XFS, which no longer sets ->io_private or passes an associated merge callback. Remove the unused parameter and the ->io_private field. CC: linux-fsdevel@vger.kernel.org Signed-off-by: Brian Foster Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/iomap/buffered-io.c | 7 +------ fs/xfs/xfs_aops.c | 2 +- include/linux/iomap.h | 5 +---- 3 files changed, 3 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 0129e6bab985..f2cd2034a87b 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1134,9 +1134,7 @@ iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) } void -iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends, - void (*merge_private)(struct iomap_ioend *ioend, - struct iomap_ioend *next)) +iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) { struct iomap_ioend *next; @@ -1148,8 +1146,6 @@ iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends, break; list_move_tail(&next->io_list, &ioend->io_list); ioend->io_size += next->io_size; - if (next->io_private && merge_private) - merge_private(ioend, next); } } EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); @@ -1236,7 +1232,6 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, ioend->io_inode = inode; ioend->io_size = 0; ioend->io_offset = offset; - ioend->io_private = NULL; ioend->io_bio = bio; return ioend; } diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 9b08db45ce85..826caa6b4a5a 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -146,7 +146,7 @@ xfs_end_io( while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend, io_list))) { list_del_init(&ioend->io_list); - iomap_ioend_try_merge(ioend, &tmp, NULL); + iomap_ioend_try_merge(ioend, &tmp); xfs_end_ioend(ioend); } } diff --git a/include/linux/iomap.h b/include/linux/iomap.h index d202fd2d0f91..c87d0cb0de6d 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -198,7 +198,6 @@ struct iomap_ioend { struct inode *io_inode; /* file being written to */ size_t io_size; /* size of the extent */ loff_t io_offset; /* offset in the file */ - void *io_private; /* file system private data */ struct bio *io_bio; /* bio being built */ struct bio io_inline_bio; /* MUST BE LAST! */ }; @@ -234,9 +233,7 @@ struct iomap_writepage_ctx { void iomap_finish_ioends(struct iomap_ioend *ioend, int error); void iomap_ioend_try_merge(struct iomap_ioend *ioend, - struct list_head *more_ioends, - void (*merge_private)(struct iomap_ioend *ioend, - struct iomap_ioend *next)); + struct list_head *more_ioends); void iomap_sort_ioends(struct list_head *ioend_list); int iomap_writepage(struct page *page, struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, -- cgit v1.2.3-71-gd317 From 98635b29a73f1a49ab6882ae58d56c9cd5ecb902 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 15 Mar 2021 10:13:54 +0100 Subject: lib: bitmap: remove the 'extern' keyword from function declarations The 'extern' keyword doesn't have any benefits for functions in header files. Remove it. Signed-off-by: Bartosz Golaszewski Reviewed-by: Andy Shevchenko --- include/linux/bitmap.h | 115 ++++++++++++++++++++++++------------------------- 1 file changed, 57 insertions(+), 58 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 70a932470b2d..6939a8983026 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -118,54 +118,53 @@ * Allocation and deallocation of bitmap. * Provided in lib/bitmap.c to avoid circular dependency. */ -extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); -extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); -extern void bitmap_free(const unsigned long *bitmap); +unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); +unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); +void bitmap_free(const unsigned long *bitmap); /* * lib/bitmap.c provides these functions: */ -extern int __bitmap_equal(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern bool __pure __bitmap_or_equal(const unsigned long *src1, - const unsigned long *src2, - const unsigned long *src3, - unsigned int nbits); -extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, - unsigned int nbits); -extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -extern void bitmap_cut(unsigned long *dst, const unsigned long *src, - unsigned int first, unsigned int cut, - unsigned int nbits); -extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, +int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +bool __pure __bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits); +void __bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits); +void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +void bitmap_cut(unsigned long *dst, const unsigned long *src, + unsigned int first, unsigned int cut, unsigned int nbits); +int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_replace(unsigned long *dst, + const unsigned long *old, const unsigned long *new, + const unsigned long *mask, unsigned int nbits); +int __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_replace(unsigned long *dst, - const unsigned long *old, const unsigned long *new, - const unsigned long *mask, unsigned int nbits); -extern int __bitmap_intersects(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_subset(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); -extern void __bitmap_set(unsigned long *map, unsigned int start, int len); -extern void __bitmap_clear(unsigned long *map, unsigned int start, int len); - -extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask, - unsigned long align_offset); +int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); +void __bitmap_set(unsigned long *map, unsigned int start, int len); +void __bitmap_clear(unsigned long *map, unsigned int start, int len); + +unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); /** * bitmap_find_next_zero_area - find a contiguous aligned zero area @@ -190,33 +189,33 @@ bitmap_find_next_zero_area(unsigned long *map, align_mask, 0); } -extern int bitmap_parse(const char *buf, unsigned int buflen, +int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits); -extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, +int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern int bitmap_parselist(const char *buf, unsigned long *maskp, +int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); -extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, +int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern void bitmap_remap(unsigned long *dst, const unsigned long *src, +void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, unsigned int nbits); -extern int bitmap_bitremap(int oldbit, +int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits); -extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, +void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, unsigned int bits); -extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, +void bitmap_fold(unsigned long *dst, const unsigned long *orig, unsigned int sz, unsigned int nbits); -extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); -extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); -extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); +int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); +void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); +int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); #ifdef __BIG_ENDIAN -extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); +void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); #else #define bitmap_copy_le bitmap_copy #endif -extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); -extern int bitmap_print_to_pagebuf(bool list, char *buf, +unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); +int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) @@ -265,9 +264,9 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst, * therefore conversion is not needed when copying data from/to arrays of u32. */ #if BITS_PER_LONG == 64 -extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); -extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, +void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits); #else #define bitmap_from_arr32(bitmap, buf, nbits) \ -- cgit v1.2.3-71-gd317 From c13656b904b6173aad723d9680a81c60de2f5edc Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 15 Mar 2021 10:13:55 +0100 Subject: lib: bitmap: order includes alphabetically For better readability and maintenance: order the includes in bitmap source files alphabetically. Signed-off-by: Bartosz Golaszewski Reviewed-by: Andy Shevchenko --- include/linux/bitmap.h | 4 ++-- lib/bitmap.c | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 6939a8983026..3282db97e06c 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -4,10 +4,10 @@ #ifndef __ASSEMBLY__ -#include #include -#include #include +#include +#include /* * bitmaps provide bit arrays that consume one or more unsigned diff --git a/lib/bitmap.c b/lib/bitmap.c index 75006c4036e9..78f70d9007ad 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -3,17 +3,18 @@ * lib/bitmap.c * Helper functions for bitmap.h. */ -#include -#include -#include -#include + #include #include #include +#include +#include +#include #include #include #include #include +#include #include #include -- cgit v1.2.3-71-gd317 From e829c2e4744850bab4d8f8ffebd00df10b4c6c2b Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 15 Mar 2021 10:13:56 +0100 Subject: lib: bitmap: provide devm_bitmap_alloc() and devm_bitmap_zalloc() Provide managed variants of bitmap_alloc() and bitmap_zalloc(). Signed-off-by: Bartosz Golaszewski Reviewed-by: Andy Shevchenko --- include/linux/bitmap.h | 8 ++++++++ lib/bitmap.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 3282db97e06c..73d039476fa4 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -9,6 +9,8 @@ #include #include +struct device; + /* * bitmaps provide bit arrays that consume one or more unsigned * longs. The bitmap interface and available operations are listed @@ -122,6 +124,12 @@ unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); void bitmap_free(const unsigned long *bitmap); +/* Managed variants of the above. */ +unsigned long *devm_bitmap_alloc(struct device *dev, + unsigned int nbits, gfp_t flags); +unsigned long *devm_bitmap_zalloc(struct device *dev, + unsigned int nbits, gfp_t flags); + /* * lib/bitmap.c provides these functions: */ diff --git a/lib/bitmap.c b/lib/bitmap.c index 78f70d9007ad..27e08c0e547e 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -1263,6 +1264,38 @@ void bitmap_free(const unsigned long *bitmap) } EXPORT_SYMBOL(bitmap_free); +static void devm_bitmap_free(void *data) +{ + unsigned long *bitmap = data; + + bitmap_free(bitmap); +} + +unsigned long *devm_bitmap_alloc(struct device *dev, + unsigned int nbits, gfp_t flags) +{ + unsigned long *bitmap; + int ret; + + bitmap = bitmap_alloc(nbits, flags); + if (!bitmap) + return NULL; + + ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap); + if (ret) + return NULL; + + return bitmap; +} +EXPORT_SYMBOL_GPL(devm_bitmap_alloc); + +unsigned long *devm_bitmap_zalloc(struct device *dev, + unsigned int nbits, gfp_t flags) +{ + return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO); +} +EXPORT_SYMBOL_GPL(devm_bitmap_zalloc); + #if BITS_PER_LONG == 64 /** * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap -- cgit v1.2.3-71-gd317 From 7716506adac4664793a9d6d3dfa31ffddfa98714 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 4 May 2021 18:32:45 -0700 Subject: mm: introduce and use mapping_empty() Patch series "Remove nrexceptional tracking", v2. We actually use nrexceptional for very little these days. It's a minor pain to keep in sync with nrpages, but the pain becomes much bigger with the THP patches because we don't know how many indices a shadow entry occupies. It's easier to just remove it than keep it accurate. Also, we save 8 bytes per inode which is nothing to sneeze at; on my laptop, it would improve shmem_inode_cache from 22 to 23 objects per 16kB, and inode_cache from 26 to 27 objects. Combined, that saves a megabyte of memory from a combined usage of 25MB for both caches. Unfortunately, ext4 doesn't cross a magic boundary, so it doesn't save any memory for ext4. This patch (of 4): Instead of checking the two counters (nrpages and nrexceptional), we can just check whether i_pages is empty. Link: https://lkml.kernel.org/r/20201026151849.24232-1-willy@infradead.org Link: https://lkml.kernel.org/r/20201026151849.24232-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Vishal Verma Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/block_dev.c | 2 +- fs/dax.c | 2 +- fs/gfs2/glock.c | 3 +-- include/linux/pagemap.h | 5 +++++ mm/truncate.c | 18 +++--------------- 5 files changed, 11 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/fs/block_dev.c b/fs/block_dev.c index a5244e08b6c8..9114e0a0e7b4 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -79,7 +79,7 @@ static void kill_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; - if (mapping->nrpages == 0 && mapping->nrexceptional == 0) + if (mapping_empty(mapping)) return; invalidate_bh_lrus(); diff --git a/fs/dax.c b/fs/dax.c index b3d27fdc6775..999f3f22aea3 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -965,7 +965,7 @@ int dax_writeback_mapping_range(struct address_space *mapping, if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) return -EIO; - if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) + if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) return 0; trace_dax_writeback_range(inode, xas.xa_index, end_index); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 84c38103aa06..ea7fc5c641c7 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -273,8 +273,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl) if (mapping) { truncate_inode_pages_final(mapping); if (!gfs2_withdrawn(sdp)) - GLOCK_BUG_ON(gl, mapping->nrpages || - mapping->nrexceptional); + GLOCK_BUG_ON(gl, !mapping_empty(mapping)); } trace_gfs2_glock_put(gl); sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 469fa7ffcf96..a4bd41128bf3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -18,6 +18,11 @@ struct pagevec; +static inline bool mapping_empty(struct address_space *mapping) +{ + return xa_empty(&mapping->i_pages); +} + /* * Bits in mapping->flags. */ diff --git a/mm/truncate.c b/mm/truncate.c index 455944264663..adb8d4107988 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -295,7 +295,7 @@ void truncate_inode_pages_range(struct address_space *mapping, pgoff_t index; int i; - if (mapping->nrpages == 0 && mapping->nrexceptional == 0) + if (mapping_empty(mapping)) goto out; /* Offsets within partial pages */ @@ -440,9 +440,6 @@ EXPORT_SYMBOL(truncate_inode_pages); */ void truncate_inode_pages_final(struct address_space *mapping) { - unsigned long nrexceptional; - unsigned long nrpages; - /* * Page reclaim can not participate in regular inode lifetime * management (can't call iput()) and thus can race with the @@ -452,16 +449,7 @@ void truncate_inode_pages_final(struct address_space *mapping) */ mapping_set_exiting(mapping); - /* - * When reclaim installs eviction entries, it increases - * nrexceptional first, then decreases nrpages. Make sure we see - * this in the right order or we might miss an entry. - */ - nrpages = mapping->nrpages; - smp_rmb(); - nrexceptional = mapping->nrexceptional; - - if (nrpages || nrexceptional) { + if (!mapping_empty(mapping)) { /* * As truncation uses a lockless tree lookup, cycle * the tree lock to make sure any ongoing tree @@ -633,7 +621,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, int ret2 = 0; int did_range_unmap = 0; - if (mapping->nrpages == 0 && mapping->nrexceptional == 0) + if (mapping_empty(mapping)) goto out; pagevec_init(&pvec); -- cgit v1.2.3-71-gd317 From 8bc3c481b3d0dcef2cf8e1b7c6b780af6725f7e3 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 4 May 2021 18:32:54 -0700 Subject: mm: remove nrexceptional from inode We no longer track anything in nrexceptional, so remove it, saving 8 bytes per inode. Link: https://lkml.kernel.org/r/20201026151849.24232-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Vishal Verma Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/inode.c | 2 +- include/linux/fs.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/inode.c b/fs/inode.c index 9e192bea0630..af48d1b722f0 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -529,7 +529,7 @@ void clear_inode(struct inode *inode) */ xa_lock_irq(&inode->i_data.i_pages); BUG_ON(inode->i_data.nrpages); - BUG_ON(inode->i_data.nrexceptional); + BUG_ON(!mapping_empty(&inode->i_data)); xa_unlock_irq(&inode->i_data.i_pages); BUG_ON(!list_empty(&inode->i_data.private_list)); BUG_ON(!(inode->i_state & I_FREEING)); diff --git a/include/linux/fs.h b/include/linux/fs.h index 12766edee81f..acef282b97c6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -442,7 +442,6 @@ int pagecache_write_end(struct file *, struct address_space *mapping, * @i_mmap: Tree of private and shared mappings. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. * @nrpages: Number of page entries, protected by the i_pages lock. - * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock. * @writeback_index: Writeback starts here. * @a_ops: Methods. * @flags: Error bits and flags (AS_*). @@ -463,7 +462,6 @@ struct address_space { struct rb_root_cached i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; - unsigned long nrexceptional; pgoff_t writeback_index; const struct address_space_operations *a_ops; unsigned long flags; -- cgit v1.2.3-71-gd317 From aec44e0f0213e36d4f0868a80cdc5097a510f79d Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 4 May 2021 18:33:00 -0700 Subject: hugetlb: pass vma into huge_pte_alloc() and huge_pmd_share() Patch series "hugetlb: Disable huge pmd unshare for uffd-wp", v4. This series tries to disable huge pmd unshare of hugetlbfs backed memory for uffd-wp. Although uffd-wp of hugetlbfs is still during rfc stage, the idea of this series may be needed for multiple tasks (Axel's uffd minor fault series, and Mike's soft dirty series), so I picked it out from the larger series. This patch (of 4): It is a preparation work to be able to behave differently in the per architecture huge_pte_alloc() according to different VMA attributes. Pass it deeper into huge_pmd_share() so that we can avoid the find_vma() call. [peterx@redhat.com: build fix] Link: https://lkml.kernel.org/r/20210304164653.GB397383@xz-x1Link: https://lkml.kernel.org/r/20210218230633.15028-1-peterx@redhat.com Link: https://lkml.kernel.org/r/20210218230633.15028-2-peterx@redhat.com Signed-off-by: Peter Xu Suggested-by: Mike Kravetz Cc: Adam Ruprecht Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrea Arcangeli Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Cannon Matthews Cc: Catalin Marinas Cc: Chinwen Chang Cc: David Rientjes Cc: "Dr . David Alan Gilbert" Cc: Huang Ying Cc: Ingo Molnar Cc: Jann Horn Cc: Jerome Glisse Cc: Kirill A. Shutemov Cc: Lokesh Gidra Cc: "Matthew Wilcox (Oracle)" Cc: Michael Ellerman Cc: "Michal Koutn" Cc: Michel Lespinasse Cc: Mike Rapoport Cc: Mina Almasry Cc: Nicholas Piggin Cc: Oliver Upton Cc: Shaohua Li Cc: Shawn Anastasio Cc: Steven Price Cc: Steven Rostedt Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/hugetlbpage.c | 4 ++-- arch/ia64/mm/hugetlbpage.c | 3 ++- arch/mips/mm/hugetlbpage.c | 4 ++-- arch/parisc/mm/hugetlbpage.c | 2 +- arch/powerpc/mm/hugetlbpage.c | 3 ++- arch/s390/mm/hugetlbpage.c | 2 +- arch/sh/mm/hugetlbpage.c | 2 +- arch/sparc/mm/hugetlbpage.c | 2 +- include/linux/hugetlb.h | 5 +++-- mm/hugetlb.c | 15 ++++++++------- mm/userfaultfd.c | 2 +- 11 files changed, 24 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 55ecf6de9ff7..6e3bcffe2837 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -252,7 +252,7 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; @@ -286,7 +286,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } else if (sz == PMD_SIZE) { if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && pud_none(READ_ONCE(*pudp))) - ptep = huge_pmd_share(mm, addr, pudp); + ptep = huge_pmd_share(mm, vma, addr, pudp); else ptep = (pte_t *)pmd_alloc(mm, pudp, addr); } else if (sz == (CONT_PMD_SIZE)) { diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index b331f94d20ac..f993cb36c062 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -25,7 +25,8 @@ unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT; EXPORT_SYMBOL(hpage_shift); pte_t * -huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index b9f76f433617..7eaff5b07873 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -21,8 +21,8 @@ #include #include -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, - unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 43652de5f139..d1d3990b83f6 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -44,7 +44,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index d142b76d507d..9a75ba078e1b 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -106,7 +106,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, * At this point we do the placement change only for BOOK3S 64. This would * possibly work on other subarchs. */ -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pg; p4d_t *p4; diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 3b5a4d25ca9b..da36d13ffc16 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -189,7 +189,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, return pte; } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 220d7bc43d2b..999ab5916e69 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -21,7 +21,7 @@ #include #include -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index ad4b42f04988..04d8790f6c32 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -279,7 +279,7 @@ unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&p unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); } unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index cccd1aab69dd..653ef322fac9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -152,7 +152,8 @@ void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud); struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); @@ -161,7 +162,7 @@ extern struct list_head huge_boot_pages; /* arch callbacks */ -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6c72433bec1e..a02a651088d3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3795,7 +3795,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, src_pte = huge_pte_offset(src, addr, sz); if (!src_pte) continue; - dst_pte = huge_pte_alloc(dst, addr, sz); + dst_pte = huge_pte_alloc(dst, vma, addr, sz); if (!dst_pte) { ret = -ENOMEM; break; @@ -4563,7 +4563,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ mapping = vma->vm_file->f_mapping; i_mmap_lock_read(mapping); - ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); + ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); if (!ptep) { i_mmap_unlock_read(mapping); return VM_FAULT_OOM; @@ -5370,9 +5370,9 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is * only required for subsequent processing. */ -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud) { - struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; @@ -5450,7 +5450,8 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, } #define want_pmd_share() (1) #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud) { return NULL; } @@ -5469,7 +5470,7 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; @@ -5488,7 +5489,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } else { BUG_ON(sz != PMD_SIZE); if (want_pmd_share() && pud_none(*pud)) - pte = huge_pmd_share(mm, addr, pud); + pte = huge_pmd_share(mm, vma, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); } diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 9a3d451402d7..063cbb17e8d8 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -290,7 +290,7 @@ retry: mutex_lock(&hugetlb_fault_mutex_table[hash]); err = -ENOMEM; - dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize); + dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); if (!dst_pte) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); -- cgit v1.2.3-71-gd317 From c1991e0705d143be773c984b006f2078aa9f2853 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 4 May 2021 18:33:04 -0700 Subject: hugetlb/userfaultfd: forbid huge pmd sharing when uffd enabled Huge pmd sharing could bring problem to userfaultfd. The thing is that userfaultfd is running its logic based on the special bits on page table entries, however the huge pmd sharing could potentially share page table entries for different address ranges. That could cause issues on either: - When sharing huge pmd page tables for an uffd write protected range, the newly mapped huge pmd range will also be write protected unexpectedly, or, - When we try to write protect a range of huge pmd shared range, we'll first do huge_pmd_unshare() in hugetlb_change_protection(), however that also means the UFFDIO_WRITEPROTECT could be silently skipped for the shared region, which could lead to data loss. While at it, a few other things are done altogether: - Move want_pmd_share() from mm/hugetlb.c into linux/hugetlb.h, because that's definitely something that arch code would like to use too - ARM64 currently directly check against CONFIG_ARCH_WANT_HUGE_PMD_SHARE when trying to share huge pmd. Switch to the want_pmd_share() helper. - Move vma_shareable() from huge_pmd_share() into want_pmd_share(). [peterx@redhat.com: fix build with !ARCH_WANT_HUGE_PMD_SHARE] Link: https://lkml.kernel.org/r/20210310185359.88297-1-peterx@redhat.com Link: https://lkml.kernel.org/r/20210218231202.15426-1-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Mike Kravetz Reviewed-by: Axel Rasmussen Tested-by: Naresh Kamboju Cc: Adam Ruprecht Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrea Arcangeli Cc: Anshuman Khandual Cc: Cannon Matthews Cc: Catalin Marinas Cc: Chinwen Chang Cc: David Rientjes Cc: "Dr . David Alan Gilbert" Cc: Huang Ying Cc: Ingo Molnar Cc: Jann Horn Cc: Jerome Glisse Cc: Kirill A. Shutemov Cc: Lokesh Gidra Cc: "Matthew Wilcox (Oracle)" Cc: Michael Ellerman Cc: "Michal Koutn" Cc: Michel Lespinasse Cc: Mike Rapoport Cc: Mina Almasry Cc: Nicholas Piggin Cc: Oliver Upton Cc: Shaohua Li Cc: Shawn Anastasio Cc: Steven Price Cc: Steven Rostedt Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/hugetlbpage.c | 3 +-- include/linux/hugetlb.h | 2 ++ include/linux/userfaultfd_k.h | 9 +++++++++ mm/hugetlb.c | 22 ++++++++++++++++------ 4 files changed, 28 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 6e3bcffe2837..58987a98e179 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -284,8 +284,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, */ ptep = pte_alloc_map(mm, pmdp, addr); } else if (sz == PMD_SIZE) { - if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && - pud_none(READ_ONCE(*pudp))) + if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) ptep = huge_pmd_share(mm, vma, addr, pudp); else ptep = (pte_t *)pmd_alloc(mm, pudp, addr); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 653ef322fac9..88e93809a455 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1040,4 +1040,6 @@ static inline __init void hugetlb_cma_check(void) } #endif +bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); + #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index a8e5f3ea9bb2..c63ccdae3eab 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -52,6 +52,15 @@ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; } +/* + * Never enable huge pmd sharing on uffd-wp registered vmas, because uffd-wp + * protect information is per pgtable entry. + */ +static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) +{ + return vma->vm_flags & VM_UFFD_WP; +} + static inline bool userfaultfd_missing(struct vm_area_struct *vma) { return vma->vm_flags & VM_UFFD_MISSING; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a02a651088d3..91647e824015 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5326,6 +5326,15 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) return false; } +bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) +{ +#ifdef CONFIG_USERFAULTFD + if (uffd_disable_huge_pmd_share(vma)) + return false; +#endif + return vma_shareable(vma, addr); +} + /* * Determine if start,end range within vma could be mapped by shared pmd. * If yes, adjust start and end to cover range associated with possible @@ -5382,9 +5391,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, pte_t *pte; spinlock_t *ptl; - if (!vma_shareable(vma, addr)) - return (pte_t *)pmd_alloc(mm, pud, addr); - i_mmap_assert_locked(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) @@ -5448,7 +5454,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; return 1; } -#define want_pmd_share() (1) + #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) @@ -5466,7 +5472,11 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { } -#define want_pmd_share() (0) + +bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) +{ + return false; +} #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB @@ -5488,7 +5498,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pte = (pte_t *)pud; } else { BUG_ON(sz != PMD_SIZE); - if (want_pmd_share() && pud_none(*pud)) + if (want_pmd_share(vma, addr) && pud_none(*pud)) pte = huge_pmd_share(mm, vma, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); -- cgit v1.2.3-71-gd317 From 537cf30bba241ae88d5f4b0b6a5e66271b394852 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 4 May 2021 18:33:08 -0700 Subject: mm/hugetlb: move flush_hugetlb_tlb_range() into hugetlb.h Prepare for it to be called outside of mm/hugetlb.c. Link: https://lkml.kernel.org/r/20210218231204.15474-1-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Mike Kravetz Reviewed-by: Axel Rasmussen Cc: Adam Ruprecht Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrea Arcangeli Cc: Anshuman Khandual Cc: Cannon Matthews Cc: Catalin Marinas Cc: Chinwen Chang Cc: David Rientjes Cc: "Dr . David Alan Gilbert" Cc: Huang Ying Cc: Ingo Molnar Cc: Jann Horn Cc: Jerome Glisse Cc: Kirill A. Shutemov Cc: Lokesh Gidra Cc: "Matthew Wilcox (Oracle)" Cc: Michael Ellerman Cc: "Michal Koutn" Cc: Michel Lespinasse Cc: Mike Rapoport Cc: Mina Almasry Cc: Nicholas Piggin Cc: Oliver Upton Cc: Shaohua Li Cc: Shawn Anastasio Cc: Steven Price Cc: Steven Rostedt Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 8 ++++++++ mm/hugetlb.c | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 88e93809a455..e43668144664 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1042,4 +1042,12 @@ static inline __init void hugetlb_cma_check(void) bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); +#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE +/* + * ARCHes with special requirements for evicting HUGETLB backing TLB entries can + * implement this. + */ +#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#endif + #endif /* _LINUX_HUGETLB_H */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 91647e824015..3868f3126534 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4996,14 +4996,6 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, return i ? i : err; } -#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE -/* - * ARCHes with special requirements for evicting HUGETLB backing TLB entries can - * implement this. - */ -#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) -#endif - unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) { -- cgit v1.2.3-71-gd317 From 6dfeaff93be1a4cab4fb48dad7df326d05059a99 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 4 May 2021 18:33:13 -0700 Subject: hugetlb/userfaultfd: unshare all pmds for hugetlbfs when register wp Huge pmd sharing for hugetlbfs is racy with userfaultfd-wp because userfaultfd-wp is always based on pgtable entries, so they cannot be shared. Walk the hugetlb range and unshare all such mappings if there is, right before UFFDIO_REGISTER will succeed and return to userspace. This will pair with want_pmd_share() in hugetlb code so that huge pmd sharing is completely disabled for userfaultfd-wp registered range. Link: https://lkml.kernel.org/r/20210218231206.15524-1-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Mike Kravetz Cc: Peter Xu Cc: Andrea Arcangeli Cc: Axel Rasmussen Cc: Mike Rapoport Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Cc: Adam Ruprecht Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Anshuman Khandual Cc: Cannon Matthews Cc: Catalin Marinas Cc: Chinwen Chang Cc: David Rientjes Cc: "Dr . David Alan Gilbert" Cc: Huang Ying Cc: Ingo Molnar Cc: Jann Horn Cc: Jerome Glisse Cc: Lokesh Gidra Cc: Michael Ellerman Cc: "Michal Koutn" Cc: Michel Lespinasse Cc: Mina Almasry Cc: Nicholas Piggin Cc: Oliver Upton Cc: Shaohua Li Cc: Shawn Anastasio Cc: Steven Price Cc: Steven Rostedt Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/userfaultfd.c | 4 ++++ include/linux/hugetlb.h | 3 +++ mm/hugetlb.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+) (limited to 'include/linux') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 0be8cdd4425a..e5ce3b4e6c3d 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -1449,6 +1450,9 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, vma->vm_flags = new_flags; vma->vm_userfaultfd_ctx.ctx = ctx; + if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) + hugetlb_unshare_all_pmds(vma); + skip: prev = vma; start = vma->vm_end; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e43668144664..0f5813522224 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -188,6 +188,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); bool is_hugetlb_entry_migration(pte_t pte); +void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); #else /* !CONFIG_HUGETLB_PAGE */ @@ -369,6 +370,8 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, return 0; } +static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } + #endif /* !CONFIG_HUGETLB_PAGE */ /* * hugepages at page global directory. If arch support diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3868f3126534..e86d3abcc300 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5691,6 +5691,57 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) } } +/* + * This function will unconditionally remove all the shared pmd pgtable entries + * within the specific vma for a hugetlbfs memory range. + */ +void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) +{ + struct hstate *h = hstate_vma(vma); + unsigned long sz = huge_page_size(h); + struct mm_struct *mm = vma->vm_mm; + struct mmu_notifier_range range; + unsigned long address, start, end; + spinlock_t *ptl; + pte_t *ptep; + + if (!(vma->vm_flags & VM_MAYSHARE)) + return; + + start = ALIGN(vma->vm_start, PUD_SIZE); + end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); + + if (start >= end) + return; + + /* + * No need to call adjust_range_if_pmd_sharing_possible(), because + * we have already done the PUD_SIZE alignment. + */ + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, + start, end); + mmu_notifier_invalidate_range_start(&range); + i_mmap_lock_write(vma->vm_file->f_mapping); + for (address = start; address < end; address += PUD_SIZE) { + unsigned long tmp = address; + + ptep = huge_pte_offset(mm, address, sz); + if (!ptep) + continue; + ptl = huge_pte_lock(h, mm, ptep); + /* We don't want 'address' to be changed */ + huge_pmd_unshare(mm, vma, &tmp, ptep); + spin_unlock(ptl); + } + flush_hugetlb_tlb_range(vma, start, end); + i_mmap_unlock_write(vma->vm_file->f_mapping); + /* + * No need to call mmu_notifier_invalidate_range(), see + * Documentation/vm/mmu_notifier.rst. + */ + mmu_notifier_invalidate_range_end(&range); +} + #ifdef CONFIG_CMA static bool cma_reserve_called __initdata; -- cgit v1.2.3-71-gd317 From d4afd60c24f87b6275b12ec3d67d8c2ad78cb075 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 4 May 2021 18:34:05 -0700 Subject: mm/huge_memory.c: remove unused macro TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG Commit 4958e4d86ecb ("mm: thp: remove debug_cow switch") forgot to remove TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG macro. Remove it here. Link: https://lkml.kernel.org/r/20210318122722.13135-6-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Zi Yan Reviewed-by: Peter Xu Cc: Aneesh Kumar K.V Cc: Matthew Wilcox Cc: Michel Lespinasse Cc: Ralph Campbell Cc: Thomas Hellstrm (Intel) Cc: Vlastimil Babka Cc: Wei Yang Cc: William Kucharski Cc: Yang Shi Cc: yuleixzhang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ba973efcd369..9626fda5efce 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -87,9 +87,6 @@ enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, -#ifdef CONFIG_DEBUG_VM - TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, -#endif }; struct kobject; -- cgit v1.2.3-71-gd317 From 2938396771c8fd0870b5284319f9e78b4b552a79 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 4 May 2021 18:34:52 -0700 Subject: hugetlb: add per-hstate mutex to synchronize user adjustments The helper routine hstate_next_node_to_alloc accesses and modifies the hstate variable next_nid_to_alloc. The helper is used by the routines alloc_pool_huge_page and adjust_pool_surplus. adjust_pool_surplus is called with hugetlb_lock held. However, alloc_pool_huge_page can not be called with the hugetlb lock held as it will call the page allocator. Two instances of alloc_pool_huge_page could be run in parallel or alloc_pool_huge_page could run in parallel with adjust_pool_surplus which may result in the variable next_nid_to_alloc becoming invalid for the caller and pages being allocated on the wrong node. Both alloc_pool_huge_page and adjust_pool_surplus are only called from the routine set_max_huge_pages after boot. set_max_huge_pages is only called as the reusult of a user writing to the proc/sysfs nr_hugepages, or nr_hugepages_mempolicy file to adjust the number of hugetlb pages. It makes little sense to allow multiple adjustment to the number of hugetlb pages in parallel. Add a mutex to the hstate and use it to only allow one hugetlb page adjustment at a time. This will synchronize modifications to the next_nid_to_alloc variable. Link: https://lkml.kernel.org/r/20210409205254.242291-4-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Acked-by: Michal Hocko Reviewed-by: Oscar Salvador Reviewed-by: Miaohe Lin Reviewed-by: Muchun Song Reviewed-by: David Hildenbrand Cc: "Aneesh Kumar K . V" Cc: Barry Song Cc: David Rientjes Cc: Hillf Danton Cc: HORIGUCHI NAOYA Cc: Joonsoo Kim Cc: Matthew Wilcox Cc: Mina Almasry Cc: Peter Xu Cc: Peter Zijlstra Cc: Roman Gushchin Cc: Shakeel Butt Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 1 + mm/hugetlb.c | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 0f5813522224..628639422c5d 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -559,6 +559,7 @@ HPAGEFLAG(Freed, freed) #define HSTATE_NAME_LEN 32 /* Defines one hugetlb page size */ struct hstate { + struct mutex resize_lock; int next_nid_to_alloc; int next_nid_to_free; unsigned int order; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a5636e1593c5..067fd29a9d51 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2621,6 +2621,11 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, else return -ENOMEM; + /* + * resize_lock mutex prevents concurrent adjustments to number of + * pages in hstate via the proc/sysfs interfaces. + */ + mutex_lock(&h->resize_lock); spin_lock(&hugetlb_lock); /* @@ -2653,6 +2658,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { if (count > persistent_huge_pages(h)) { spin_unlock(&hugetlb_lock); + mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); return -EINVAL; } @@ -2727,6 +2733,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, out: h->max_huge_pages = persistent_huge_pages(h); spin_unlock(&hugetlb_lock); + mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); @@ -3214,6 +3221,7 @@ void __init hugetlb_add_hstate(unsigned int order) BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); BUG_ON(order == 0); h = &hstates[hugetlb_max_hstate++]; + mutex_init(&h->resize_lock); h->order = order; h->mask = ~(huge_page_size(h) - 1); for (i = 0; i < MAX_NUMNODES; ++i) -- cgit v1.2.3-71-gd317 From 369fa227c21949b22fd7374506c4992a0d7bb580 Mon Sep 17 00:00:00 2001 From: Oscar Salvador Date: Tue, 4 May 2021 18:35:26 -0700 Subject: mm: make alloc_contig_range handle free hugetlb pages alloc_contig_range will fail if it ever sees a HugeTLB page within the range we are trying to allocate, even when that page is free and can be easily reallocated. This has proved to be problematic for some users of alloc_contic_range, e.g: CMA and virtio-mem, where those would fail the call even when those pages lay in ZONE_MOVABLE and are free. We can do better by trying to replace such page. Free hugepages are tricky to handle so as to no userspace application notices disruption, we need to replace the current free hugepage with a new one. In order to do that, a new function called alloc_and_dissolve_huge_page is introduced. This function will first try to get a new fresh hugepage, and if it succeeds, it will replace the old one in the free hugepage pool. The free page replacement is done under hugetlb_lock, so no external users of hugetlb will notice the change. To allocate the new huge page, we use alloc_buddy_huge_page(), so we do not have to deal with any counters, and prep_new_huge_page() is not called. This is valulable because in case we need to free the new page, we only need to call __free_pages(). Once we know that the page to be replaced is a genuine 0-refcounted huge page, we remove the old page from the freelist by remove_hugetlb_page(). Then, we can call __prep_new_huge_page() and __prep_account_new_huge_page() for the new huge page to properly initialize it and increment the hstate->nr_huge_pages counter (previously decremented by remove_hugetlb_page()). Once done, the page is enqueued by enqueue_huge_page() and it is ready to be used. There is one tricky case when page's refcount is 0 because it is in the process of being released. A missing PageHugeFreed bit will tell us that freeing is in flight so we retry after dropping the hugetlb_lock. The race window should be small and the next retry should make a forward progress. E.g: CPU0 CPU1 free_huge_page() isolate_or_dissolve_huge_page PageHuge() == T alloc_and_dissolve_huge_page alloc_buddy_huge_page() spin_lock_irq(hugetlb_lock) // PageHuge() && !PageHugeFreed && // !PageCount() spin_unlock_irq(hugetlb_lock) spin_lock_irq(hugetlb_lock) 1) update_and_free_page PageHuge() == F __free_pages() 2) enqueue_huge_page SetPageHugeFreed() spin_unlock_irq(&hugetlb_lock) spin_lock_irq(hugetlb_lock) 1) PageHuge() == F (freed by case#1 from CPU0) 2) PageHuge() == T PageHugeFreed() == T - proceed with replacing the page In the case above we retry as the window race is quite small and we have high chances to succeed next time. With regard to the allocation, we restrict it to the node the page belongs to with __GFP_THISNODE, meaning we do not fallback on other node's zones. Note that gigantic hugetlb pages are fenced off since there is a cyclic dependency between them and alloc_contig_range. Link: https://lkml.kernel.org/r/20210419075413.1064-6-osalvador@suse.de Signed-off-by: Oscar Salvador Acked-by: Michal Hocko Acked-by: David Hildenbrand Reviewed-by: Mike Kravetz Cc: Muchun Song Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 6 +++ mm/compaction.c | 33 ++++++++++++-- mm/hugetlb.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 152 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 628639422c5d..ec6a10b8860a 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -588,6 +588,7 @@ struct huge_bootmem_page { struct hstate *hstate; }; +int isolate_or_dissolve_huge_page(struct page *page); struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, @@ -870,6 +871,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +static inline int isolate_or_dissolve_huge_page(struct page *page) +{ + return -ENOMEM; +} + static inline struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) diff --git a/mm/compaction.c b/mm/compaction.c index c4d8007221b7..b77e1382307f 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -788,7 +788,7 @@ static bool too_many_isolated(pg_data_t *pgdat) * Isolate all pages that can be migrated from the range specified by * [low_pfn, end_pfn). The range is expected to be within same pageblock. * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, - * or 0. + * -ENOMEM in case we could not allocate a page, or 0. * cc->migrate_pfn will contain the next pfn to scan. * * The pages are isolated on cc->migratepages list (not required to be empty), @@ -906,6 +906,29 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, valid_page = page; } + if (PageHuge(page) && cc->alloc_contig) { + ret = isolate_or_dissolve_huge_page(page); + + /* + * Fail isolation in case isolate_or_dissolve_huge_page() + * reports an error. In case of -ENOMEM, abort right away. + */ + if (ret < 0) { + /* Do not report -EBUSY down the chain */ + if (ret == -EBUSY) + ret = 0; + low_pfn += (1UL << compound_order(page)) - 1; + goto isolate_fail; + } + + /* + * Ok, the hugepage was dissolved. Now these pages are + * Buddy and cannot be re-allocated because they are + * isolated. Fall-through as the check below handles + * Buddy pages. + */ + } + /* * Skip if free. We read page order here without zone lock * which is generally unsafe, but the race window is small and @@ -1065,7 +1088,7 @@ isolate_fail_put: put_page(page); isolate_fail: - if (!skip_on_failure) + if (!skip_on_failure && ret != -ENOMEM) continue; /* @@ -1091,6 +1114,9 @@ isolate_fail: */ next_skip_pfn += 1UL << cc->order; } + + if (ret == -ENOMEM) + break; } /* @@ -1143,7 +1169,8 @@ fatal_pending: * @start_pfn: The first PFN to start isolating. * @end_pfn: The one-past-last PFN. * - * Returns -EAGAIN when contented, -EINTR in case of a signal pending or 0. + * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM + * in case we could not allocate a page, or 0. */ int isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 63760be2688e..92f3cd08946f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2267,6 +2267,122 @@ static void restore_reserve_on_error(struct hstate *h, } } +/* + * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one + * @h: struct hstate old page belongs to + * @old_page: Old page to dissolve + * Returns 0 on success, otherwise negated error. + */ +static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page) +{ + gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; + int nid = page_to_nid(old_page); + struct page *new_page; + int ret = 0; + + /* + * Before dissolving the page, we need to allocate a new one for the + * pool to remain stable. Using alloc_buddy_huge_page() allows us to + * not having to deal with prep_new_huge_page() and avoids dealing of any + * counters. This simplifies and let us do the whole thing under the + * lock. + */ + new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL); + if (!new_page) + return -ENOMEM; + +retry: + spin_lock_irq(&hugetlb_lock); + if (!PageHuge(old_page)) { + /* + * Freed from under us. Drop new_page too. + */ + goto free_new; + } else if (page_count(old_page)) { + /* + * Someone has grabbed the page, fail for now. + */ + ret = -EBUSY; + goto free_new; + } else if (!HPageFreed(old_page)) { + /* + * Page's refcount is 0 but it has not been enqueued in the + * freelist yet. Race window is small, so we can succeed here if + * we retry. + */ + spin_unlock_irq(&hugetlb_lock); + cond_resched(); + goto retry; + } else { + /* + * Ok, old_page is still a genuine free hugepage. Remove it from + * the freelist and decrease the counters. These will be + * incremented again when calling __prep_account_new_huge_page() + * and enqueue_huge_page() for new_page. The counters will remain + * stable since this happens under the lock. + */ + remove_hugetlb_page(h, old_page, false); + + /* + * new_page needs to be initialized with the standard hugetlb + * state. This is normally done by prep_new_huge_page() but + * that takes hugetlb_lock which is already held so we need to + * open code it here. + * Reference count trick is needed because allocator gives us + * referenced page but the pool requires pages with 0 refcount. + */ + __prep_new_huge_page(new_page); + __prep_account_new_huge_page(h, nid); + page_ref_dec(new_page); + enqueue_huge_page(h, new_page); + + /* + * Pages have been replaced, we can safely free the old one. + */ + spin_unlock_irq(&hugetlb_lock); + update_and_free_page(h, old_page); + } + + return ret; + +free_new: + spin_unlock_irq(&hugetlb_lock); + __free_pages(new_page, huge_page_order(h)); + + return ret; +} + +int isolate_or_dissolve_huge_page(struct page *page) +{ + struct hstate *h; + struct page *head; + + /* + * The page might have been dissolved from under our feet, so make sure + * to carefully check the state under the lock. + * Return success when racing as if we dissolved the page ourselves. + */ + spin_lock_irq(&hugetlb_lock); + if (PageHuge(page)) { + head = compound_head(page); + h = page_hstate(head); + } else { + spin_unlock_irq(&hugetlb_lock); + return 0; + } + spin_unlock_irq(&hugetlb_lock); + + /* + * Fence off gigantic pages as there is a cyclic dependency between + * alloc_contig_range and them. Return -ENOMEM as this has the effect + * of bailing out right away without further retrying. + */ + if (hstate_is_gigantic(h)) + return -ENOMEM; + + return alloc_and_dissolve_huge_page(h, head); +} + struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { -- cgit v1.2.3-71-gd317 From ae37c7ff79f1f030e28ec76c46ee032f8fd07607 Mon Sep 17 00:00:00 2001 From: Oscar Salvador Date: Tue, 4 May 2021 18:35:29 -0700 Subject: mm: make alloc_contig_range handle in-use hugetlb pages alloc_contig_range() will fail if it finds a HugeTLB page within the range, without a chance to handle them. Since HugeTLB pages can be migrated as any LRU or Movable page, it does not make sense to bail out without trying. Enable the interface to recognize in-use HugeTLB pages so we can migrate them, and have much better chances to succeed the call. Link: https://lkml.kernel.org/r/20210419075413.1064-7-osalvador@suse.de Signed-off-by: Oscar Salvador Reviewed-by: Mike Kravetz Acked-by: Michal Hocko Acked-by: David Hildenbrand Cc: Muchun Song Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 5 +++-- mm/compaction.c | 12 +++++++++++- mm/hugetlb.c | 22 +++++++++++++++++----- mm/vmscan.c | 5 +++-- 4 files changed, 34 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ec6a10b8860a..d0f310ae3f82 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -588,7 +588,7 @@ struct huge_bootmem_page { struct hstate *hstate; }; -int isolate_or_dissolve_huge_page(struct page *page); +int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, @@ -871,7 +871,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; -static inline int isolate_or_dissolve_huge_page(struct page *page) +static inline int isolate_or_dissolve_huge_page(struct page *page, + struct list_head *list) { return -ENOMEM; } diff --git a/mm/compaction.c b/mm/compaction.c index b77e1382307f..335862f1661c 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -907,7 +907,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } if (PageHuge(page) && cc->alloc_contig) { - ret = isolate_or_dissolve_huge_page(page); + ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); /* * Fail isolation in case isolate_or_dissolve_huge_page() @@ -921,6 +921,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, goto isolate_fail; } + if (PageHuge(page)) { + /* + * Hugepage was successfully isolated and placed + * on the cc->migratepages list. + */ + low_pfn += compound_nr(page) - 1; + goto isolate_success_no_list; + } + /* * Ok, the hugepage was dissolved. Now these pages are * Buddy and cannot be re-allocated because they are @@ -1062,6 +1071,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, isolate_success: list_add(&page->lru, &cc->migratepages); +isolate_success_no_list: cc->nr_migratepages += compound_nr(page); nr_isolated += compound_nr(page); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 92f3cd08946f..b5977d9709ad 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2271,9 +2271,11 @@ static void restore_reserve_on_error(struct hstate *h, * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one * @h: struct hstate old page belongs to * @old_page: Old page to dissolve + * @list: List to isolate the page in case we need to * Returns 0 on success, otherwise negated error. */ -static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page) +static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page, + struct list_head *list) { gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; int nid = page_to_nid(old_page); @@ -2300,9 +2302,13 @@ retry: goto free_new; } else if (page_count(old_page)) { /* - * Someone has grabbed the page, fail for now. + * Someone has grabbed the page, try to isolate it here. + * Fail with -EBUSY if not possible. */ - ret = -EBUSY; + spin_unlock_irq(&hugetlb_lock); + if (!isolate_huge_page(old_page, list)) + ret = -EBUSY; + spin_lock_irq(&hugetlb_lock); goto free_new; } else if (!HPageFreed(old_page)) { /* @@ -2352,10 +2358,11 @@ free_new: return ret; } -int isolate_or_dissolve_huge_page(struct page *page) +int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) { struct hstate *h; struct page *head; + int ret = -EBUSY; /* * The page might have been dissolved from under our feet, so make sure @@ -2380,7 +2387,12 @@ int isolate_or_dissolve_huge_page(struct page *page) if (hstate_is_gigantic(h)) return -ENOMEM; - return alloc_and_dissolve_huge_page(h, head); + if (page_count(head) && isolate_huge_page(head, list)) + ret = 0; + else if (!page_count(head)) + ret = alloc_and_dissolve_huge_page(h, head, list); + + return ret; } struct page *alloc_huge_page(struct vm_area_struct *vma, diff --git a/mm/vmscan.c b/mm/vmscan.c index 562e87cbd7a1..42aaef30633e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1507,8 +1507,9 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, LIST_HEAD(clean_pages); list_for_each_entry_safe(page, next, page_list, lru) { - if (page_is_file_lru(page) && !PageDirty(page) && - !__PageMovable(page) && !PageUnevictable(page)) { + if (!PageHuge(page) && page_is_file_lru(page) && + !PageDirty(page) && !__PageMovable(page) && + !PageUnevictable(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } -- cgit v1.2.3-71-gd317 From 7677f7fd8be76659cd2d0db8ff4093bbb51c20e5 Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Tue, 4 May 2021 18:35:36 -0700 Subject: userfaultfd: add minor fault registration mode Patch series "userfaultfd: add minor fault handling", v9. Overview ======== This series adds a new userfaultfd feature, UFFD_FEATURE_MINOR_HUGETLBFS. When enabled (via the UFFDIO_API ioctl), this feature means that any hugetlbfs VMAs registered with UFFDIO_REGISTER_MODE_MISSING will *also* get events for "minor" faults. By "minor" fault, I mean the following situation: Let there exist two mappings (i.e., VMAs) to the same page(s) (shared memory). One of the mappings is registered with userfaultfd (in minor mode), and the other is not. Via the non-UFFD mapping, the underlying pages have already been allocated & filled with some contents. The UFFD mapping has not yet been faulted in; when it is touched for the first time, this results in what I'm calling a "minor" fault. As a concrete example, when working with hugetlbfs, we have huge_pte_none(), but find_lock_page() finds an existing page. We also add a new ioctl to resolve such faults: UFFDIO_CONTINUE. The idea is, userspace resolves the fault by either a) doing nothing if the contents are already correct, or b) updating the underlying contents using the second, non-UFFD mapping (via memcpy/memset or similar, or something fancier like RDMA, or etc...). In either case, userspace issues UFFDIO_CONTINUE to tell the kernel "I have ensured the page contents are correct, carry on setting up the mapping". Use Case ======== Consider the use case of VM live migration (e.g. under QEMU/KVM): 1. While a VM is still running, we copy the contents of its memory to a target machine. The pages are populated on the target by writing to the non-UFFD mapping, using the setup described above. The VM is still running (and therefore its memory is likely changing), so this may be repeated several times, until we decide the target is "up to date enough". 2. We pause the VM on the source, and start executing on the target machine. During this gap, the VM's user(s) will *see* a pause, so it is desirable to minimize this window. 3. Between the last time any page was copied from the source to the target, and when the VM was paused, the contents of that page may have changed - and therefore the copy we have on the target machine is out of date. Although we can keep track of which pages are out of date, for VMs with large amounts of memory, it is "slow" to transfer this information to the target machine. We want to resume execution before such a transfer would complete. 4. So, the guest begins executing on the target machine. The first time it touches its memory (via the UFFD-registered mapping), userspace wants to intercept this fault. Userspace checks whether or not the page is up to date, and if not, copies the updated page from the source machine, via the non-UFFD mapping. Finally, whether a copy was performed or not, userspace issues a UFFDIO_CONTINUE ioctl to tell the kernel "I have ensured the page contents are correct, carry on setting up the mapping". We don't have to do all of the final updates on-demand. The userfaultfd manager can, in the background, also copy over updated pages once it receives the map of which pages are up-to-date or not. Interaction with Existing APIs ============================== Because this is a feature, a registered VMA could potentially receive both missing and minor faults. I spent some time thinking through how the existing API interacts with the new feature: UFFDIO_CONTINUE cannot be used to resolve non-minor faults, as it does not allocate a new page. If UFFDIO_CONTINUE is used on a non-minor fault: - For non-shared memory or shmem, -EINVAL is returned. - For hugetlb, -EFAULT is returned. UFFDIO_COPY and UFFDIO_ZEROPAGE cannot be used to resolve minor faults. Without modifications, the existing codepath assumes a new page needs to be allocated. This is okay, since userspace must have a second non-UFFD-registered mapping anyway, thus there isn't much reason to want to use these in any case (just memcpy or memset or similar). - If UFFDIO_COPY is used on a minor fault, -EEXIST is returned. - If UFFDIO_ZEROPAGE is used on a minor fault, -EEXIST is returned (or -EINVAL in the case of hugetlb, as UFFDIO_ZEROPAGE is unsupported in any case). - UFFDIO_WRITEPROTECT simply doesn't work with shared memory, and returns -ENOENT in that case (regardless of the kind of fault). Future Work =========== This series only supports hugetlbfs. I have a second series in flight to support shmem as well, extending the functionality. This series is more mature than the shmem support at this point, and the functionality works fully on hugetlbfs, so this series can be merged first and then shmem support will follow. This patch (of 6): This feature allows userspace to intercept "minor" faults. By "minor" faults, I mean the following situation: Let there exist two mappings (i.e., VMAs) to the same page(s). One of the mappings is registered with userfaultfd (in minor mode), and the other is not. Via the non-UFFD mapping, the underlying pages have already been allocated & filled with some contents. The UFFD mapping has not yet been faulted in; when it is touched for the first time, this results in what I'm calling a "minor" fault. As a concrete example, when working with hugetlbfs, we have huge_pte_none(), but find_lock_page() finds an existing page. This commit adds the new registration mode, and sets the relevant flag on the VMAs being registered. In the hugetlb fault path, if we find that we have huge_pte_none(), but find_lock_page() does indeed find an existing page, then we have a "minor" fault, and if the VMA has the userfaultfd registration flag, we call into userfaultfd to handle it. This is implemented as a new registration mode, instead of an API feature. This is because the alternative implementation has significant drawbacks [1]. However, doing it this was requires we allocate a VM_* flag for the new registration mode. On 32-bit systems, there are no unused bits, so this feature is only supported on architectures with CONFIG_ARCH_USES_HIGH_VMA_FLAGS. When attempting to register a VMA in MINOR mode on 32-bit architectures, we return -EINVAL. [1] https://lore.kernel.org/patchwork/patch/1380226/ [peterx@redhat.com: fix minor fault page leak] Link: https://lkml.kernel.org/r/20210322175132.36659-1-peterx@redhat.com Link: https://lkml.kernel.org/r/20210301222728.176417-1-axelrasmussen@google.com Link: https://lkml.kernel.org/r/20210301222728.176417-2-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Reviewed-by: Peter Xu Reviewed-by: Mike Kravetz Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrea Arcangeli Cc: Anshuman Khandual Cc: Catalin Marinas Cc: Chinwen Chang Cc: Huang Ying Cc: Ingo Molnar Cc: Jann Horn Cc: Jerome Glisse Cc: Lokesh Gidra Cc: "Matthew Wilcox (Oracle)" Cc: Michael Ellerman Cc: "Michal Koutn" Cc: Michel Lespinasse Cc: Mike Rapoport Cc: Nicholas Piggin Cc: Peter Xu Cc: Shaohua Li Cc: Shawn Anastasio Cc: Steven Rostedt Cc: Steven Price Cc: Vlastimil Babka Cc: Adam Ruprecht Cc: Axel Rasmussen Cc: Cannon Matthews Cc: "Dr . David Alan Gilbert" Cc: David Rientjes Cc: Mina Almasry Cc: Oliver Upton Cc: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/Kconfig | 1 + arch/x86/Kconfig | 1 + fs/proc/task_mmu.c | 3 ++ fs/userfaultfd.c | 78 +++++++++++++++++++++++---------------- include/linux/mm.h | 7 ++++ include/linux/userfaultfd_k.h | 15 +++++++- include/trace/events/mmflags.h | 7 ++++ include/uapi/linux/userfaultfd.h | 15 +++++++- init/Kconfig | 5 +++ mm/hugetlb.c | 80 ++++++++++++++++++++++++++-------------- 10 files changed, 150 insertions(+), 62 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7f2a80091337..04c69f606537 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -213,6 +213,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD help ARM 64-bit (AArch64) Linux support. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dac15f646f79..1c350e8782ed 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -165,6 +165,7 @@ config X86 select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 select HAVE_ARCH_USERFAULTFD_WP if X86_64 && USERFAULTFD + select HAVE_ARCH_USERFAULTFD_MINOR if X86_64 && USERFAULTFD select HAVE_ARCH_VMAP_STACK if X86_64 select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_WITHIN_STACK_FRAMES diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e862cab69583..fc9784544b24 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -661,6 +661,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) [ilog2(VM_PKEY_BIT4)] = "", #endif #endif /* CONFIG_ARCH_HAS_PKEYS */ +#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR + [ilog2(VM_UFFD_MINOR)] = "ui", +#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ }; size_t i; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index e5ce3b4e6c3d..ba35cafa8b0d 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -197,24 +197,21 @@ static inline struct uffd_msg userfault_msg(unsigned long address, msg_init(&msg); msg.event = UFFD_EVENT_PAGEFAULT; msg.arg.pagefault.address = address; + /* + * These flags indicate why the userfault occurred: + * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. + * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault. + * - Neither of these flags being set indicates a MISSING fault. + * + * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write + * fault. Otherwise, it was a read fault. + */ if (flags & FAULT_FLAG_WRITE) - /* - * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the - * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE - * was not set in a UFFD_EVENT_PAGEFAULT, it means it - * was a read fault, otherwise if set it means it's - * a write fault. - */ msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; if (reason & VM_UFFD_WP) - /* - * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the - * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was - * not set in a UFFD_EVENT_PAGEFAULT, it means it was - * a missing fault, otherwise if set it means it's a - * write protect fault. - */ msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; + if (reason & VM_UFFD_MINOR) + msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR; if (features & UFFD_FEATURE_THREAD_ID) msg.arg.pagefault.feat.ptid = task_pid_vnr(current); return msg; @@ -401,8 +398,10 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) BUG_ON(ctx->mm != mm); - VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP)); - VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP)); + /* Any unrecognized flag is a bug. */ + VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); + /* 0 or > 1 flags set is a bug; we expect exactly 1. */ + VM_BUG_ON(!reason || (reason & (reason - 1))); if (ctx->features & UFFD_FEATURE_SIGBUS) goto out; @@ -612,7 +611,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, for (vma = mm->mmap; vma; vma = vma->vm_next) if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; - vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + vma->vm_flags &= ~__VM_UFFD_FLAGS; } mmap_write_unlock(mm); @@ -644,7 +643,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) octx = vma->vm_userfaultfd_ctx.ctx; if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; - vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + vma->vm_flags &= ~__VM_UFFD_FLAGS; return 0; } @@ -726,7 +725,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma, } else { /* Drop uffd context if remap feature not enabled */ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; - vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + vma->vm_flags &= ~__VM_UFFD_FLAGS; } } @@ -867,12 +866,12 @@ static int userfaultfd_release(struct inode *inode, struct file *file) for (vma = mm->mmap; vma; vma = vma->vm_next) { cond_resched(); BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ - !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); + !!(vma->vm_flags & __VM_UFFD_FLAGS)); if (vma->vm_userfaultfd_ctx.ctx != ctx) { prev = vma; continue; } - new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); + new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, @@ -1262,9 +1261,19 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma, unsigned long vm_flags) { /* FIXME: add WP support to hugetlbfs and shmem */ - return vma_is_anonymous(vma) || - ((is_vm_hugetlb_page(vma) || vma_is_shmem(vma)) && - !(vm_flags & VM_UFFD_WP)); + if (vm_flags & VM_UFFD_WP) { + if (is_vm_hugetlb_page(vma) || vma_is_shmem(vma)) + return false; + } + + if (vm_flags & VM_UFFD_MINOR) { + /* FIXME: Add minor fault interception for shmem. */ + if (!is_vm_hugetlb_page(vma)) + return false; + } + + return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || + vma_is_shmem(vma); } static int userfaultfd_register(struct userfaultfd_ctx *ctx, @@ -1290,14 +1299,19 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, ret = -EINVAL; if (!uffdio_register.mode) goto out; - if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING| - UFFDIO_REGISTER_MODE_WP)) + if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES) goto out; vm_flags = 0; if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) vm_flags |= VM_UFFD_MISSING; if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) vm_flags |= VM_UFFD_WP; + if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) { +#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR + goto out; +#endif + vm_flags |= VM_UFFD_MINOR; + } ret = validate_range(mm, &uffdio_register.range.start, uffdio_register.range.len); @@ -1341,7 +1355,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, cond_resched(); BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ - !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); + !!(cur->vm_flags & __VM_UFFD_FLAGS)); /* check not compatible vmas */ ret = -EINVAL; @@ -1421,8 +1435,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, start = vma->vm_start; vma_end = min(end, vma->vm_end); - new_flags = (vma->vm_flags & - ~(VM_UFFD_MISSING|VM_UFFD_WP)) | vm_flags; + new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; prev = vma_merge(mm, prev, start, vma_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), @@ -1544,7 +1557,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, cond_resched(); BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ - !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); + !!(cur->vm_flags & __VM_UFFD_FLAGS)); /* * Check not compatible vmas, not strictly required @@ -1595,7 +1608,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); } - new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); + new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; prev = vma_merge(mm, prev, start, vma_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), @@ -1863,6 +1876,9 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, goto err_out; /* report all available features and ioctls to userland */ uffdio_api.features = UFFD_API_FEATURES; +#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR + uffdio_api.features &= ~UFFD_FEATURE_MINOR_HUGETLBFS; +#endif uffdio_api.ioctls = UFFD_API_IOCTLS; ret = -EFAULT; if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) diff --git a/include/linux/mm.h b/include/linux/mm.h index 011f43605807..1dbb53c44243 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -372,6 +372,13 @@ extern unsigned int kobjsize(const void *objp); # define VM_GROWSUP VM_NONE #endif +#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR +# define VM_UFFD_MINOR_BIT 37 +# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ +#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ +# define VM_UFFD_MINOR VM_NONE +#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ + /* Bits set in the VMA until the stack is in its final location */ #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index c63ccdae3eab..0390e5ac63b3 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -17,6 +17,9 @@ #include #include +/* The set of all possible UFFD-related VM flags. */ +#define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR) + /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining * new flags, since they might collide with O_* ones. We want @@ -71,6 +74,11 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma) return vma->vm_flags & VM_UFFD_WP; } +static inline bool userfaultfd_minor(struct vm_area_struct *vma) +{ + return vma->vm_flags & VM_UFFD_MINOR; +} + static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, pte_t pte) { @@ -85,7 +93,7 @@ static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, static inline bool userfaultfd_armed(struct vm_area_struct *vma) { - return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); + return vma->vm_flags & __VM_UFFD_FLAGS; } extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); @@ -132,6 +140,11 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma) return false; } +static inline bool userfaultfd_minor(struct vm_area_struct *vma) +{ + return false; +} + static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, pte_t pte) { diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 67018d367b9f..629c7a0eaff2 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -137,6 +137,12 @@ IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) #define IF_HAVE_VM_SOFTDIRTY(flag,name) #endif +#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR +# define IF_HAVE_UFFD_MINOR(flag, name) {flag, name}, +#else +# define IF_HAVE_UFFD_MINOR(flag, name) +#endif + #define __def_vmaflag_names \ {VM_READ, "read" }, \ {VM_WRITE, "write" }, \ @@ -148,6 +154,7 @@ IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) {VM_MAYSHARE, "mayshare" }, \ {VM_GROWSDOWN, "growsdown" }, \ {VM_UFFD_MISSING, "uffd_missing" }, \ +IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \ {VM_PFNMAP, "pfnmap" }, \ {VM_DENYWRITE, "denywrite" }, \ {VM_UFFD_WP, "uffd_wp" }, \ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 5f2d88212f7c..f24dd4fcbad9 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -19,15 +19,19 @@ * means the userland is reading). */ #define UFFD_API ((__u64)0xAA) +#define UFFD_API_REGISTER_MODES (UFFDIO_REGISTER_MODE_MISSING | \ + UFFDIO_REGISTER_MODE_WP | \ + UFFDIO_REGISTER_MODE_MINOR) #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \ UFFD_FEATURE_EVENT_FORK | \ UFFD_FEATURE_EVENT_REMAP | \ - UFFD_FEATURE_EVENT_REMOVE | \ + UFFD_FEATURE_EVENT_REMOVE | \ UFFD_FEATURE_EVENT_UNMAP | \ UFFD_FEATURE_MISSING_HUGETLBFS | \ UFFD_FEATURE_MISSING_SHMEM | \ UFFD_FEATURE_SIGBUS | \ - UFFD_FEATURE_THREAD_ID) + UFFD_FEATURE_THREAD_ID | \ + UFFD_FEATURE_MINOR_HUGETLBFS) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -127,6 +131,7 @@ struct uffd_msg { /* flags for UFFD_EVENT_PAGEFAULT */ #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ #define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */ +#define UFFD_PAGEFAULT_FLAG_MINOR (1<<2) /* If reason is VM_UFFD_MINOR */ struct uffdio_api { /* userland asks for an API number and the features to enable */ @@ -171,6 +176,10 @@ struct uffdio_api { * * UFFD_FEATURE_THREAD_ID pid of the page faulted task_struct will * be returned, if feature is not requested 0 will be returned. + * + * UFFD_FEATURE_MINOR_HUGETLBFS indicates that minor faults + * can be intercepted (via REGISTER_MODE_MINOR) for + * hugetlbfs-backed pages. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -181,6 +190,7 @@ struct uffdio_api { #define UFFD_FEATURE_EVENT_UNMAP (1<<6) #define UFFD_FEATURE_SIGBUS (1<<7) #define UFFD_FEATURE_THREAD_ID (1<<8) +#define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) __u64 features; __u64 ioctls; @@ -195,6 +205,7 @@ struct uffdio_register { struct uffdio_range range; #define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0) #define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1) +#define UFFDIO_REGISTER_MODE_MINOR ((__u64)1<<2) __u64 mode; /* diff --git a/init/Kconfig b/init/Kconfig index 9acb7762e971..1413413fcb9f 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1644,6 +1644,11 @@ config HAVE_ARCH_USERFAULTFD_WP help Arch has userfaultfd write protection support +config HAVE_ARCH_USERFAULTFD_MINOR + bool + help + Arch has userfaultfd minor fault support + config MEMBARRIER bool "Enable membarrier() system call" if EXPERT default y diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b5977d9709ad..84530876b2ae 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4469,6 +4469,44 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, return 0; } +static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, + struct address_space *mapping, + pgoff_t idx, + unsigned int flags, + unsigned long haddr, + unsigned long reason) +{ + vm_fault_t ret; + u32 hash; + struct vm_fault vmf = { + .vma = vma, + .address = haddr, + .flags = flags, + + /* + * Hard to debug if it ends up being + * used by a callee that assumes + * something about the other + * uninitialized fields... same as in + * memory.c + */ + }; + + /* + * hugetlb_fault_mutex and i_mmap_rwsem must be + * dropped before handling userfault. Reacquire + * after handling fault to make calling code simpler. + */ + hash = hugetlb_fault_mutex_hash(mapping, idx); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + i_mmap_unlock_read(mapping); + ret = handle_userfault(&vmf, reason); + i_mmap_lock_read(mapping); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + return ret; +} + static vm_fault_t hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, @@ -4507,35 +4545,11 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, retry: page = find_lock_page(mapping, idx); if (!page) { - /* - * Check for page in userfault range - */ + /* Check for page in userfault range */ if (userfaultfd_missing(vma)) { - u32 hash; - struct vm_fault vmf = { - .vma = vma, - .address = haddr, - .flags = flags, - /* - * Hard to debug if it ends up being - * used by a callee that assumes - * something about the other - * uninitialized fields... same as in - * memory.c - */ - }; - - /* - * hugetlb_fault_mutex and i_mmap_rwsem must be - * dropped before handling userfault. Reacquire - * after handling fault to make calling code simpler. - */ - hash = hugetlb_fault_mutex_hash(mapping, idx); - mutex_unlock(&hugetlb_fault_mutex_table[hash]); - i_mmap_unlock_read(mapping); - ret = handle_userfault(&vmf, VM_UFFD_MISSING); - i_mmap_lock_read(mapping); - mutex_lock(&hugetlb_fault_mutex_table[hash]); + ret = hugetlb_handle_userfault(vma, mapping, idx, + flags, haddr, + VM_UFFD_MISSING); goto out; } @@ -4591,6 +4605,16 @@ retry: VM_FAULT_SET_HINDEX(hstate_index(h)); goto backout_unlocked; } + + /* Check for page in userfault range. */ + if (userfaultfd_minor(vma)) { + unlock_page(page); + put_page(page); + ret = hugetlb_handle_userfault(vma, mapping, idx, + flags, haddr, + VM_UFFD_MINOR); + goto out; + } } /* -- cgit v1.2.3-71-gd317 From 0d9cadabd193c6008d256533f544de8206fd3a80 Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Tue, 4 May 2021 18:35:40 -0700 Subject: userfaultfd: disable huge PMD sharing for MINOR registered VMAs As the comment says: for the MINOR fault use case, although the page might be present and populated in the other (non-UFFD-registered) half of the mapping, it may be out of date, and we explicitly want userspace to get a minor fault so it can check and potentially update the page's contents. Huge PMD sharing would prevent these faults from occurring for suitably aligned areas, so disable it upon UFFD registration. Link: https://lkml.kernel.org/r/20210301222728.176417-3-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Reviewed-by: Peter Xu Reviewed-by: Mike Kravetz Cc: Adam Ruprecht Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrea Arcangeli Cc: Anshuman Khandual Cc: Cannon Matthews Cc: Catalin Marinas Cc: Chinwen Chang Cc: David Rientjes Cc: "Dr . David Alan Gilbert" Cc: Huang Ying Cc: Ingo Molnar Cc: Jann Horn Cc: Jerome Glisse Cc: Kirill A. Shutemov Cc: Lokesh Gidra Cc: "Matthew Wilcox (Oracle)" Cc: Michael Ellerman Cc: "Michal Koutn" Cc: Michel Lespinasse Cc: Mike Rapoport Cc: Mina Almasry Cc: Nicholas Piggin Cc: Oliver Upton Cc: Shaohua Li Cc: Shawn Anastasio Cc: Steven Price Cc: Steven Rostedt Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/userfaultfd_k.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 0390e5ac63b3..e060d5f77cc5 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -56,12 +56,19 @@ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, } /* - * Never enable huge pmd sharing on uffd-wp registered vmas, because uffd-wp - * protect information is per pgtable entry. + * Never enable huge pmd sharing on some uffd registered vmas: + * + * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry. + * + * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for + * VMAs which share huge pmds. (If you have two mappings to the same + * underlying pages, and fault in the non-UFFD-registered one with a write, + * with huge pmd sharing this would *also* setup the second UFFD-registered + * mapping, and we'd not get minor faults.) */ static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) { - return vma->vm_flags & VM_UFFD_WP; + return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); } static inline bool userfaultfd_missing(struct vm_area_struct *vma) -- cgit v1.2.3-71-gd317 From 714c189108244f1df579689061db1d785d92e7e2 Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Tue, 4 May 2021 18:35:45 -0700 Subject: userfaultfd: hugetlbfs: only compile UFFD helpers if config enabled For background, mm/userfaultfd.c provides a general mcopy_atomic implementation. But some types of memory (i.e., hugetlb and shmem) need a slightly different implementation, so they provide their own helpers for this. In other words, userfaultfd is the only caller of these functions. This patch achieves two things: 1. Don't spend time compiling code which will end up never being referenced anyway (a small build time optimization). 2. In patches later in this series, we extend the signature of these helpers with UFFD-specific state (a mode enumeration). Once this happens, we *have to* either not compile the helpers, or unconditionally define the UFFD-only state (which seems messier to me). This includes the declarations in the headers, as otherwise they'd yield warnings about implicitly defining the type of those arguments. Link: https://lkml.kernel.org/r/20210301222728.176417-4-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Reviewed-by: Mike Kravetz Reviewed-by: Peter Xu Cc: Adam Ruprecht Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrea Arcangeli Cc: Anshuman Khandual Cc: Cannon Matthews Cc: Catalin Marinas Cc: Chinwen Chang Cc: David Rientjes Cc: "Dr . David Alan Gilbert" Cc: Huang Ying Cc: Ingo Molnar Cc: Jann Horn Cc: Jerome Glisse Cc: Kirill A. Shutemov Cc: Lokesh Gidra Cc: "Matthew Wilcox (Oracle)" Cc: Michael Ellerman Cc: "Michal Koutn" Cc: Michel Lespinasse Cc: Mike Rapoport Cc: Mina Almasry Cc: Nicholas Piggin Cc: Oliver Upton Cc: Shaohua Li Cc: Shawn Anastasio Cc: Steven Price Cc: Steven Rostedt Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 4 ++++ mm/hugetlb.c | 2 ++ 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d0f310ae3f82..a1dbe4568707 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -134,11 +134,13 @@ void hugetlb_show_meminfo(void); unsigned long hugetlb_total_pages(void); vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); +#ifdef CONFIG_USERFAULTFD int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep); +#endif /* CONFIG_USERFAULTFD */ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); @@ -310,6 +312,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, BUG(); } +#ifdef CONFIG_USERFAULTFD static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, @@ -320,6 +323,7 @@ static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, BUG(); return 0; } +#endif /* CONFIG_USERFAULTFD */ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 84530876b2ae..b105a455124d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4855,6 +4855,7 @@ out_mutex: return ret; } +#ifdef CONFIG_USERFAULTFD /* * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with * modifications for huge pages. @@ -4985,6 +4986,7 @@ out_release_nounlock: put_page(page); goto out; } +#endif /* CONFIG_USERFAULTFD */ static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, int refs, struct page **pages, -- cgit v1.2.3-71-gd317 From f619147104c8ea71e120e4936d2b68ec11a1e527 Mon Sep 17 00:00:00 2001 From: Axel Rasmussen Date: Tue, 4 May 2021 18:35:49 -0700 Subject: userfaultfd: add UFFDIO_CONTINUE ioctl This ioctl is how userspace ought to resolve "minor" userfaults. The idea is, userspace is notified that a minor fault has occurred. It might change the contents of the page using its second non-UFFD mapping, or not. Then, it calls UFFDIO_CONTINUE to tell the kernel "I have ensured the page contents are correct, carry on setting up the mapping". Note that it doesn't make much sense to use UFFDIO_{COPY,ZEROPAGE} for MINOR registered VMAs. ZEROPAGE maps the VMA to the zero page; but in the minor fault case, we already have some pre-existing underlying page. Likewise, UFFDIO_COPY isn't useful if we have a second non-UFFD mapping. We'd just use memcpy() or similar instead. It turns out hugetlb_mcopy_atomic_pte() already does very close to what we want, if an existing page is provided via `struct page **pagep`. We already special-case the behavior a bit for the UFFDIO_ZEROPAGE case, so just extend that design: add an enum for the three modes of operation, and make the small adjustments needed for the MCOPY_ATOMIC_CONTINUE case. (Basically, look up the existing page, and avoid adding the existing page to the page cache or calling set_page_huge_active() on it.) Link: https://lkml.kernel.org/r/20210301222728.176417-5-axelrasmussen@google.com Signed-off-by: Axel Rasmussen Reviewed-by: Peter Xu Cc: Adam Ruprecht Cc: Alexander Viro Cc: Alexey Dobriyan Cc: Andrea Arcangeli Cc: Anshuman Khandual Cc: Cannon Matthews Cc: Catalin Marinas Cc: Chinwen Chang Cc: David Rientjes Cc: "Dr . David Alan Gilbert" Cc: Huang Ying Cc: Ingo Molnar Cc: Jann Horn Cc: Jerome Glisse Cc: Kirill A. Shutemov Cc: Lokesh Gidra Cc: "Matthew Wilcox (Oracle)" Cc: Michael Ellerman Cc: "Michal Koutn" Cc: Michel Lespinasse Cc: Mike Kravetz Cc: Mike Rapoport Cc: Mina Almasry Cc: Nicholas Piggin Cc: Oliver Upton Cc: Shaohua Li Cc: Shawn Anastasio Cc: Steven Price Cc: Steven Rostedt Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/userfaultfd.c | 67 ++++++++++++++++++++++++++++++++++++++++ include/linux/hugetlb.h | 3 ++ include/linux/userfaultfd_k.h | 18 +++++++++++ include/uapi/linux/userfaultfd.h | 21 +++++++++++-- mm/hugetlb.c | 40 +++++++++++++++--------- mm/userfaultfd.c | 37 +++++++++++++--------- 6 files changed, 156 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ba35cafa8b0d..14f92285d04f 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1487,6 +1487,10 @@ out_unlock: if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)) ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT); + /* CONTINUE ioctl is only supported for MINOR ranges. */ + if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR)) + ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE); + /* * Now that we scanned all vmas we can already tell * userland which ioctls methods are guaranteed to @@ -1840,6 +1844,66 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, return ret; } +static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) +{ + __s64 ret; + struct uffdio_continue uffdio_continue; + struct uffdio_continue __user *user_uffdio_continue; + struct userfaultfd_wake_range range; + + user_uffdio_continue = (struct uffdio_continue __user *)arg; + + ret = -EAGAIN; + if (READ_ONCE(ctx->mmap_changing)) + goto out; + + ret = -EFAULT; + if (copy_from_user(&uffdio_continue, user_uffdio_continue, + /* don't copy the output fields */ + sizeof(uffdio_continue) - (sizeof(__s64)))) + goto out; + + ret = validate_range(ctx->mm, &uffdio_continue.range.start, + uffdio_continue.range.len); + if (ret) + goto out; + + ret = -EINVAL; + /* double check for wraparound just in case. */ + if (uffdio_continue.range.start + uffdio_continue.range.len <= + uffdio_continue.range.start) { + goto out; + } + if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE) + goto out; + + if (mmget_not_zero(ctx->mm)) { + ret = mcopy_continue(ctx->mm, uffdio_continue.range.start, + uffdio_continue.range.len, + &ctx->mmap_changing); + mmput(ctx->mm); + } else { + return -ESRCH; + } + + if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) + return -EFAULT; + if (ret < 0) + goto out; + + /* len == 0 would wake all */ + BUG_ON(!ret); + range.len = ret; + if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) { + range.start = uffdio_continue.range.start; + wake_userfault(ctx, &range); + } + ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN; + +out: + return ret; +} + static inline unsigned int uffd_ctx_features(__u64 user_features) { /* @@ -1927,6 +1991,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd, case UFFDIO_WRITEPROTECT: ret = userfaultfd_writeprotect(ctx, arg); break; + case UFFDIO_CONTINUE: + ret = userfaultfd_continue(ctx, arg); + break; } return ret; } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index a1dbe4568707..b92f25ccef58 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -11,6 +11,7 @@ #include #include #include +#include struct ctl_table; struct user_struct; @@ -139,6 +140,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + enum mcopy_atomic_mode mode, struct page **pagep); #endif /* CONFIG_USERFAULTFD */ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, @@ -318,6 +320,7 @@ static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + enum mcopy_atomic_mode mode, struct page **pagep) { BUG(); diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index e060d5f77cc5..794d1538b8ba 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -37,6 +37,22 @@ extern int sysctl_unprivileged_userfaultfd; extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); +/* + * The mode of operation for __mcopy_atomic and its helpers. + * + * This is almost an implementation detail (mcopy_atomic below doesn't take this + * as a parameter), but it's exposed here because memory-kind-specific + * implementations (e.g. hugetlbfs) need to know the mode of operation. + */ +enum mcopy_atomic_mode { + /* A normal copy_from_user into the destination range. */ + MCOPY_ATOMIC_NORMAL, + /* Don't copy; map the destination range to the zero page. */ + MCOPY_ATOMIC_ZEROPAGE, + /* Just install pte(s) with the existing page(s) in the page cache. */ + MCOPY_ATOMIC_CONTINUE, +}; + extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool *mmap_changing, __u64 mode); @@ -44,6 +60,8 @@ extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, bool *mmap_changing); +extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start, + unsigned long len, bool *mmap_changing); extern int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool enable_wp, bool *mmap_changing); diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index f24dd4fcbad9..bafbeb1a2624 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -40,10 +40,12 @@ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ (__u64)1 << _UFFDIO_ZEROPAGE | \ - (__u64)1 << _UFFDIO_WRITEPROTECT) + (__u64)1 << _UFFDIO_WRITEPROTECT | \ + (__u64)1 << _UFFDIO_CONTINUE) #define UFFD_API_RANGE_IOCTLS_BASIC \ ((__u64)1 << _UFFDIO_WAKE | \ - (__u64)1 << _UFFDIO_COPY) + (__u64)1 << _UFFDIO_COPY | \ + (__u64)1 << _UFFDIO_CONTINUE) /* * Valid ioctl command number range with this API is from 0x00 to @@ -59,6 +61,7 @@ #define _UFFDIO_COPY (0x03) #define _UFFDIO_ZEROPAGE (0x04) #define _UFFDIO_WRITEPROTECT (0x06) +#define _UFFDIO_CONTINUE (0x07) #define _UFFDIO_API (0x3F) /* userfaultfd ioctl ids */ @@ -77,6 +80,8 @@ struct uffdio_zeropage) #define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \ struct uffdio_writeprotect) +#define UFFDIO_CONTINUE _IOR(UFFDIO, _UFFDIO_CONTINUE, \ + struct uffdio_continue) /* read() structure */ struct uffd_msg { @@ -268,6 +273,18 @@ struct uffdio_writeprotect { __u64 mode; }; +struct uffdio_continue { + struct uffdio_range range; +#define UFFDIO_CONTINUE_MODE_DONTWAKE ((__u64)1<<0) + __u64 mode; + + /* + * Fields below here are written by the ioctl and must be at the end: + * the copy_from_user will not read past here. + */ + __s64 mapped; +}; + /* * Flags for the userfaultfd(2) system call itself. */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b105a455124d..533e5a26e437 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include "internal.h" @@ -4865,8 +4864,10 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + enum mcopy_atomic_mode mode, struct page **pagep) { + bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE); struct address_space *mapping; pgoff_t idx; unsigned long size; @@ -4876,8 +4877,17 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, spinlock_t *ptl; int ret; struct page *page; + int writable; - if (!*pagep) { + mapping = dst_vma->vm_file->f_mapping; + idx = vma_hugecache_offset(h, dst_vma, dst_addr); + + if (is_continue) { + ret = -EFAULT; + page = find_lock_page(mapping, idx); + if (!page) + goto out; + } else if (!*pagep) { ret = -ENOMEM; page = alloc_huge_page(dst_vma, dst_addr, 0); if (IS_ERR(page)) @@ -4906,13 +4916,8 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, */ __SetPageUptodate(page); - mapping = dst_vma->vm_file->f_mapping; - idx = vma_hugecache_offset(h, dst_vma, dst_addr); - - /* - * If shared, add to page cache - */ - if (vm_shared) { + /* Add shared, newly allocated pages to the page cache. */ + if (vm_shared && !is_continue) { size = i_size_read(mapping->host) >> huge_page_shift(h); ret = -EFAULT; if (idx >= size) @@ -4957,8 +4962,14 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); } - _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE); - if (dst_vma->vm_flags & VM_WRITE) + /* For CONTINUE on a non-shared VMA, don't set VM_WRITE for CoW. */ + if (is_continue && !vm_shared) + writable = 0; + else + writable = dst_vma->vm_flags & VM_WRITE; + + _dst_pte = make_huge_pte(dst_vma, page, writable); + if (writable) _dst_pte = huge_pte_mkdirty(_dst_pte); _dst_pte = pte_mkyoung(_dst_pte); @@ -4972,15 +4983,16 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); - SetHPageMigratable(page); - if (vm_shared) + if (!is_continue) + SetHPageMigratable(page); + if (vm_shared || is_continue) unlock_page(page); ret = 0; out: return ret; out_release_unlock: spin_unlock(ptl); - if (vm_shared) + if (vm_shared || is_continue) unlock_page(page); out_release_nounlock: put_page(page); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 063cbb17e8d8..e14b3820c6a8 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -207,7 +207,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool zeropage) + enum mcopy_atomic_mode mode) { int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; int vm_shared = dst_vma->vm_flags & VM_SHARED; @@ -227,7 +227,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, * by THP. Since we can not reliably insert a zero page, this * feature is not supported. */ - if (zeropage) { + if (mode == MCOPY_ATOMIC_ZEROPAGE) { mmap_read_unlock(dst_mm); return -EINVAL; } @@ -273,8 +273,6 @@ retry: } while (src_addr < src_start + len) { - pte_t dst_pteval; - BUG_ON(dst_addr >= dst_start + len); /* @@ -297,16 +295,16 @@ retry: goto out_unlock; } - err = -EEXIST; - dst_pteval = huge_ptep_get(dst_pte); - if (!huge_pte_none(dst_pteval)) { + if (mode != MCOPY_ATOMIC_CONTINUE && + !huge_pte_none(huge_ptep_get(dst_pte))) { + err = -EEXIST; mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); goto out_unlock; } err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, - dst_addr, src_addr, &page); + dst_addr, src_addr, mode, &page); mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); @@ -408,7 +406,7 @@ extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool zeropage); + enum mcopy_atomic_mode mode); #endif /* CONFIG_HUGETLB_PAGE */ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, @@ -458,7 +456,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool zeropage, + enum mcopy_atomic_mode mcopy_mode, bool *mmap_changing, __u64 mode) { @@ -469,6 +467,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, long copied; struct page *page; bool wp_copy; + bool zeropage = (mcopy_mode == MCOPY_ATOMIC_ZEROPAGE); /* * Sanitize the command parameters: @@ -527,10 +526,12 @@ retry: */ if (is_vm_hugetlb_page(dst_vma)) return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, - src_start, len, zeropage); + src_start, len, mcopy_mode); if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) goto out_unlock; + if (mcopy_mode == MCOPY_ATOMIC_CONTINUE) + goto out_unlock; /* * Ensure the dst_vma has a anon_vma or this page @@ -626,14 +627,22 @@ ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool *mmap_changing, __u64 mode) { - return __mcopy_atomic(dst_mm, dst_start, src_start, len, false, - mmap_changing, mode); + return __mcopy_atomic(dst_mm, dst_start, src_start, len, + MCOPY_ATOMIC_NORMAL, mmap_changing, mode); } ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool *mmap_changing) { - return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0); + return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE, + mmap_changing, 0); +} + +ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start, + unsigned long len, bool *mmap_changing) +{ + return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE, + mmap_changing, 0); } int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, -- cgit v1.2.3-71-gd317 From 202e35db5e719ee8af6028183403f475e243f82d Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 4 May 2021 18:36:04 -0700 Subject: mm/vmscan: replace implicit RECLAIM_ZONE checks with explicit checks RECLAIM_ZONE was assumed to be unused because it was never explicitly used in the kernel. However, there were a number of places where it was checked implicitly by checking 'node_reclaim_mode' for a zero value. These zero checks are not great because it is not obvious what a zero mode *means* in the code. Replace them with a helper which makes it more obvious: node_reclaim_enabled(). This helper also provides a handy place to explicitly check the RECLAIM_ZONE bit itself. Check it explicitly there to make it more obvious where the bit can affect behavior. This should have no functional impact. Link: https://lkml.kernel.org/r/20210219172559.BF589C44@viggo.jf.intel.com Signed-off-by: Dave Hansen Reviewed-by: Ben Widawsky Reviewed-by: Oscar Salvador Acked-by: Christoph Lameter Acked-by: David Rientjes Cc: Alex Shi Cc: "Tobin C. Harding" Cc: Huang Ying Cc: Dan Williams Cc: Qian Cai Cc: Daniel Wagner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 7 +++++++ mm/khugepaged.c | 2 +- mm/page_alloc.c | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 4cc6ec3bf0ab..42191da1bdc9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -12,6 +12,7 @@ #include #include #include +#include #include struct notifier_block; @@ -378,6 +379,12 @@ extern int sysctl_min_slab_ratio; #define node_reclaim_mode 0 #endif +static inline bool node_reclaim_enabled(void) +{ + /* Is any node_reclaim_mode bit set? */ + return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); +} + extern void check_move_unevictable_pages(struct pagevec *pvec); extern int kswapd_run(int nid); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a03569eda183..ea74da3232ab 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -809,7 +809,7 @@ static bool khugepaged_scan_abort(int nid) * If node_reclaim_mode is disabled, then no extra effort is made to * allocate memory locally. */ - if (!node_reclaim_mode) + if (!node_reclaim_enabled()) return false; /* If there is a count for this node already, it must be acceptable */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 80fa6e0f9ed9..19cdd8a829dc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3968,7 +3968,7 @@ retry: if (alloc_flags & ALLOC_NO_WATERMARKS) goto try_this_zone; - if (node_reclaim_mode == 0 || + if (!node_reclaim_enabled() || !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) continue; -- cgit v1.2.3-71-gd317 From 2bfd36374edd9ed7f2ebf66cacebedf7273901cb Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Tue, 4 May 2021 18:36:11 -0700 Subject: mm: vmscan: consolidate shrinker_maps handling code The shrinker map management is not purely memcg specific, it is at the intersection between memory cgroup and shrinkers. It's allocation and assignment of a structure, and the only memcg bit is the map is being stored in a memcg structure. So move the shrinker_maps handling code into vmscan.c for tighter integration with shrinker code, and remove the "memcg_" prefix. There is no functional change. Link: https://lkml.kernel.org/r/20210311190845.9708-3-shy828301@gmail.com Signed-off-by: Yang Shi Acked-by: Vlastimil Babka Acked-by: Kirill Tkhai Acked-by: Roman Gushchin Reviewed-by: Shakeel Butt Cc: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 11 ++-- mm/huge_memory.c | 4 +- mm/list_lru.c | 6 +-- mm/memcontrol.c | 130 ++------------------------------------------ mm/vmscan.c | 132 +++++++++++++++++++++++++++++++++++++++++++-- 5 files changed, 142 insertions(+), 141 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5904716f29ba..7dbd2c9bad32 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1610,10 +1610,9 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) return false; } -extern int memcg_expand_shrinker_maps(int new_id); - -extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id); +int alloc_shrinker_maps(struct mem_cgroup *memcg); +void free_shrinker_maps(struct mem_cgroup *memcg); +void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); #else #define mem_cgroup_sockets_enabled 0 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; @@ -1623,8 +1622,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) return false; } -static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id) +static inline void set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id) { } #endif diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b3788683f7d3..98456017744d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2830,8 +2830,8 @@ void deferred_split_huge_page(struct page *page) ds_queue->split_queue_len++; #ifdef CONFIG_MEMCG if (memcg) - memcg_set_shrinker_bit(memcg, page_to_nid(page), - deferred_split_shrinker.id); + set_shrinker_bit(memcg, page_to_nid(page), + deferred_split_shrinker.id); #endif } spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); diff --git a/mm/list_lru.c b/mm/list_lru.c index 6f067b6b935f..cd58790d0fb3 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -125,8 +125,8 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) list_add_tail(item, &l->list); /* Set shrinker bit if the first element was added */ if (!l->nr_items++) - memcg_set_shrinker_bit(memcg, nid, - lru_shrinker_id(lru)); + set_shrinker_bit(memcg, nid, + lru_shrinker_id(lru)); nlru->nr_items++; spin_unlock(&nlru->lock); return true; @@ -540,7 +540,7 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, if (src->nr_items) { dst->nr_items += src->nr_items; - memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); + set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); src->nr_items = 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c100265dc393..09fd17ba6de2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -400,130 +400,6 @@ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); EXPORT_SYMBOL(memcg_kmem_enabled_key); #endif -static int memcg_shrinker_map_size; -static DEFINE_MUTEX(memcg_shrinker_map_mutex); - -static void memcg_free_shrinker_map_rcu(struct rcu_head *head) -{ - kvfree(container_of(head, struct memcg_shrinker_map, rcu)); -} - -static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, - int size, int old_size) -{ - struct memcg_shrinker_map *new, *old; - struct mem_cgroup_per_node *pn; - int nid; - - lockdep_assert_held(&memcg_shrinker_map_mutex); - - for_each_node(nid) { - pn = memcg->nodeinfo[nid]; - old = rcu_dereference_protected(pn->shrinker_map, true); - /* Not yet online memcg */ - if (!old) - return 0; - - new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); - if (!new) - return -ENOMEM; - - /* Set all old bits, clear all new bits */ - memset(new->map, (int)0xff, old_size); - memset((void *)new->map + old_size, 0, size - old_size); - - rcu_assign_pointer(pn->shrinker_map, new); - call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); - } - - return 0; -} - -static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) -{ - struct mem_cgroup_per_node *pn; - struct memcg_shrinker_map *map; - int nid; - - if (mem_cgroup_is_root(memcg)) - return; - - for_each_node(nid) { - pn = memcg->nodeinfo[nid]; - map = rcu_dereference_protected(pn->shrinker_map, true); - kvfree(map); - rcu_assign_pointer(pn->shrinker_map, NULL); - } -} - -static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) -{ - struct memcg_shrinker_map *map; - int nid, size, ret = 0; - - if (mem_cgroup_is_root(memcg)) - return 0; - - mutex_lock(&memcg_shrinker_map_mutex); - size = memcg_shrinker_map_size; - for_each_node(nid) { - map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); - if (!map) { - memcg_free_shrinker_maps(memcg); - ret = -ENOMEM; - break; - } - rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); - } - mutex_unlock(&memcg_shrinker_map_mutex); - - return ret; -} - -int memcg_expand_shrinker_maps(int new_id) -{ - int size, old_size, ret = 0; - struct mem_cgroup *memcg; - - size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); - old_size = memcg_shrinker_map_size; - if (size <= old_size) - return 0; - - mutex_lock(&memcg_shrinker_map_mutex); - if (!root_mem_cgroup) - goto unlock; - - for_each_mem_cgroup(memcg) { - if (mem_cgroup_is_root(memcg)) - continue; - ret = memcg_expand_one_shrinker_map(memcg, size, old_size); - if (ret) { - mem_cgroup_iter_break(NULL, memcg); - goto unlock; - } - } -unlock: - if (!ret) - memcg_shrinker_map_size = size; - mutex_unlock(&memcg_shrinker_map_mutex); - return ret; -} - -void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) -{ - if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { - struct memcg_shrinker_map *map; - - rcu_read_lock(); - map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); - /* Pairs with smp mb in shrink_slab() */ - smp_mb__before_atomic(); - set_bit(shrinker_id, map->map); - rcu_read_unlock(); - } -} - /** * mem_cgroup_css_from_page - css of the memcg associated with a page * @page: page of interest @@ -5242,11 +5118,11 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) struct mem_cgroup *memcg = mem_cgroup_from_css(css); /* - * A memcg must be visible for memcg_expand_shrinker_maps() + * A memcg must be visible for expand_shrinker_maps() * by the time the maps are allocated. So, we allocate maps * here, when for_each_mem_cgroup() can't skip it. */ - if (memcg_alloc_shrinker_maps(memcg)) { + if (alloc_shrinker_maps(memcg)) { mem_cgroup_id_remove(memcg); return -ENOMEM; } @@ -5310,7 +5186,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) vmpressure_cleanup(&memcg->vmpressure); cancel_work_sync(&memcg->high_work); mem_cgroup_remove_from_trees(memcg); - memcg_free_shrinker_maps(memcg); + free_shrinker_maps(memcg); memcg_free_kmem(memcg); mem_cgroup_free(memcg); } diff --git a/mm/vmscan.c b/mm/vmscan.c index d231af7cae06..bead5ae1e7e2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -185,6 +185,132 @@ static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG + +static int memcg_shrinker_map_size; +static DEFINE_MUTEX(memcg_shrinker_map_mutex); + +static void free_shrinker_map_rcu(struct rcu_head *head) +{ + kvfree(container_of(head, struct memcg_shrinker_map, rcu)); +} + +static int expand_one_shrinker_map(struct mem_cgroup *memcg, + int size, int old_size) +{ + struct memcg_shrinker_map *new, *old; + struct mem_cgroup_per_node *pn; + int nid; + + lockdep_assert_held(&memcg_shrinker_map_mutex); + + for_each_node(nid) { + pn = memcg->nodeinfo[nid]; + old = rcu_dereference_protected(pn->shrinker_map, true); + /* Not yet online memcg */ + if (!old) + return 0; + + new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); + if (!new) + return -ENOMEM; + + /* Set all old bits, clear all new bits */ + memset(new->map, (int)0xff, old_size); + memset((void *)new->map + old_size, 0, size - old_size); + + rcu_assign_pointer(pn->shrinker_map, new); + call_rcu(&old->rcu, free_shrinker_map_rcu); + } + + return 0; +} + +void free_shrinker_maps(struct mem_cgroup *memcg) +{ + struct mem_cgroup_per_node *pn; + struct memcg_shrinker_map *map; + int nid; + + if (mem_cgroup_is_root(memcg)) + return; + + for_each_node(nid) { + pn = memcg->nodeinfo[nid]; + map = rcu_dereference_protected(pn->shrinker_map, true); + kvfree(map); + rcu_assign_pointer(pn->shrinker_map, NULL); + } +} + +int alloc_shrinker_maps(struct mem_cgroup *memcg) +{ + struct memcg_shrinker_map *map; + int nid, size, ret = 0; + + if (mem_cgroup_is_root(memcg)) + return 0; + + mutex_lock(&memcg_shrinker_map_mutex); + size = memcg_shrinker_map_size; + for_each_node(nid) { + map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); + if (!map) { + free_shrinker_maps(memcg); + ret = -ENOMEM; + break; + } + rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); + } + mutex_unlock(&memcg_shrinker_map_mutex); + + return ret; +} + +static int expand_shrinker_maps(int new_id) +{ + int size, old_size, ret = 0; + struct mem_cgroup *memcg; + + size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); + old_size = memcg_shrinker_map_size; + if (size <= old_size) + return 0; + + mutex_lock(&memcg_shrinker_map_mutex); + if (!root_mem_cgroup) + goto unlock; + + memcg = mem_cgroup_iter(NULL, NULL, NULL); + do { + if (mem_cgroup_is_root(memcg)) + continue; + ret = expand_one_shrinker_map(memcg, size, old_size); + if (ret) { + mem_cgroup_iter_break(NULL, memcg); + goto unlock; + } + } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); +unlock: + if (!ret) + memcg_shrinker_map_size = size; + mutex_unlock(&memcg_shrinker_map_mutex); + return ret; +} + +void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) +{ + if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { + struct memcg_shrinker_map *map; + + rcu_read_lock(); + map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); + /* Pairs with smp mb in shrink_slab() */ + smp_mb__before_atomic(); + set_bit(shrinker_id, map->map); + rcu_read_unlock(); + } +} + /* * We allow subsystems to populate their shrinker-related * LRU lists before register_shrinker_prepared() is called @@ -212,7 +338,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) goto unlock; if (id >= shrinker_nr_max) { - if (memcg_expand_shrinker_maps(id)) { + if (expand_shrinker_maps(id)) { idr_remove(&shrinker_idr, id); goto unlock; } @@ -590,7 +716,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, * case, we invoke the shrinker one more time and reset * the bit if it reports that it is not empty anymore. * The memory barrier here pairs with the barrier in - * memcg_set_shrinker_bit(): + * set_shrinker_bit(): * * list_lru_add() shrink_slab_memcg() * list_add_tail() clear_bit() @@ -602,7 +728,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, if (ret == SHRINK_EMPTY) ret = 0; else - memcg_set_shrinker_bit(memcg, nid, i); + set_shrinker_bit(memcg, nid, i); } freed += ret; -- cgit v1.2.3-71-gd317 From e4262c4f51d6373447c9d89093f49ff6b1e607be Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Tue, 4 May 2021 18:36:23 -0700 Subject: mm: memcontrol: rename shrinker_map to shrinker_info The following patch is going to add nr_deferred into shrinker_map, the change will make shrinker_map not only include map anymore, so rename it to "memcg_shrinker_info". And this should make the patch adding nr_deferred cleaner and readable and make review easier. Also remove the "memcg_" prefix. Link: https://lkml.kernel.org/r/20210311190845.9708-7-shy828301@gmail.com Signed-off-by: Yang Shi Acked-by: Vlastimil Babka Acked-by: Kirill Tkhai Acked-by: Roman Gushchin Reviewed-by: Shakeel Butt Cc: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 8 +++---- mm/memcontrol.c | 6 ++--- mm/vmscan.c | 58 +++++++++++++++++++++++----------------------- 3 files changed, 36 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7dbd2c9bad32..6cd800fe9a67 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -117,7 +117,7 @@ struct batched_lruvec_stat { * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, * which have elements charged to this memcg. */ -struct memcg_shrinker_map { +struct shrinker_info { struct rcu_head rcu; unsigned long map[]; }; @@ -145,7 +145,7 @@ struct mem_cgroup_per_node { struct mem_cgroup_reclaim_iter iter; - struct memcg_shrinker_map __rcu *shrinker_map; + struct shrinker_info __rcu *shrinker_info; struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ @@ -1610,8 +1610,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) return false; } -int alloc_shrinker_maps(struct mem_cgroup *memcg); -void free_shrinker_maps(struct mem_cgroup *memcg); +int alloc_shrinker_info(struct mem_cgroup *memcg); +void free_shrinker_info(struct mem_cgroup *memcg); void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); #else #define mem_cgroup_sockets_enabled 0 diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 09fd17ba6de2..36f31d611dea 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5118,11 +5118,11 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) struct mem_cgroup *memcg = mem_cgroup_from_css(css); /* - * A memcg must be visible for expand_shrinker_maps() + * A memcg must be visible for expand_shrinker_info() * by the time the maps are allocated. So, we allocate maps * here, when for_each_mem_cgroup() can't skip it. */ - if (alloc_shrinker_maps(memcg)) { + if (alloc_shrinker_info(memcg)) { mem_cgroup_id_remove(memcg); return -ENOMEM; } @@ -5186,7 +5186,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) vmpressure_cleanup(&memcg->vmpressure); cancel_work_sync(&memcg->high_work); mem_cgroup_remove_from_trees(memcg); - free_shrinker_maps(memcg); + free_shrinker_info(memcg); memcg_free_kmem(memcg); mem_cgroup_free(memcg); } diff --git a/mm/vmscan.c b/mm/vmscan.c index aa99a835cf89..518084ce8757 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -192,16 +192,16 @@ static inline int shrinker_map_size(int nr_items) return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long)); } -static int expand_one_shrinker_map(struct mem_cgroup *memcg, - int size, int old_size) +static int expand_one_shrinker_info(struct mem_cgroup *memcg, + int size, int old_size) { - struct memcg_shrinker_map *new, *old; + struct shrinker_info *new, *old; struct mem_cgroup_per_node *pn; int nid; for_each_node(nid) { pn = memcg->nodeinfo[nid]; - old = rcu_dereference_protected(pn->shrinker_map, true); + old = rcu_dereference_protected(pn->shrinker_info, true); /* Not yet online memcg */ if (!old) return 0; @@ -214,17 +214,17 @@ static int expand_one_shrinker_map(struct mem_cgroup *memcg, memset(new->map, (int)0xff, old_size); memset((void *)new->map + old_size, 0, size - old_size); - rcu_assign_pointer(pn->shrinker_map, new); + rcu_assign_pointer(pn->shrinker_info, new); kvfree_rcu(old, rcu); } return 0; } -void free_shrinker_maps(struct mem_cgroup *memcg) +void free_shrinker_info(struct mem_cgroup *memcg) { struct mem_cgroup_per_node *pn; - struct memcg_shrinker_map *map; + struct shrinker_info *info; int nid; if (mem_cgroup_is_root(memcg)) @@ -232,15 +232,15 @@ void free_shrinker_maps(struct mem_cgroup *memcg) for_each_node(nid) { pn = memcg->nodeinfo[nid]; - map = rcu_dereference_protected(pn->shrinker_map, true); - kvfree(map); - rcu_assign_pointer(pn->shrinker_map, NULL); + info = rcu_dereference_protected(pn->shrinker_info, true); + kvfree(info); + rcu_assign_pointer(pn->shrinker_info, NULL); } } -int alloc_shrinker_maps(struct mem_cgroup *memcg) +int alloc_shrinker_info(struct mem_cgroup *memcg) { - struct memcg_shrinker_map *map; + struct shrinker_info *info; int nid, size, ret = 0; if (mem_cgroup_is_root(memcg)) @@ -249,20 +249,20 @@ int alloc_shrinker_maps(struct mem_cgroup *memcg) down_write(&shrinker_rwsem); size = shrinker_map_size(shrinker_nr_max); for_each_node(nid) { - map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); - if (!map) { - free_shrinker_maps(memcg); + info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid); + if (!info) { + free_shrinker_info(memcg); ret = -ENOMEM; break; } - rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); + rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); } up_write(&shrinker_rwsem); return ret; } -static int expand_shrinker_maps(int new_id) +static int expand_shrinker_info(int new_id) { int size, old_size, ret = 0; int new_nr_max = new_id + 1; @@ -282,7 +282,7 @@ static int expand_shrinker_maps(int new_id) do { if (mem_cgroup_is_root(memcg)) continue; - ret = expand_one_shrinker_map(memcg, size, old_size); + ret = expand_one_shrinker_info(memcg, size, old_size); if (ret) { mem_cgroup_iter_break(NULL, memcg); goto out; @@ -298,13 +298,13 @@ out: void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) { if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { - struct memcg_shrinker_map *map; + struct shrinker_info *info; rcu_read_lock(); - map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); + info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); /* Pairs with smp mb in shrink_slab() */ smp_mb__before_atomic(); - set_bit(shrinker_id, map->map); + set_bit(shrinker_id, info->map); rcu_read_unlock(); } } @@ -335,7 +335,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) goto unlock; if (id >= shrinker_nr_max) { - if (expand_shrinker_maps(id)) { + if (expand_shrinker_info(id)) { idr_remove(&shrinker_idr, id); goto unlock; } @@ -665,7 +665,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority) { - struct memcg_shrinker_map *map; + struct shrinker_info *info; unsigned long ret, freed = 0; int i; @@ -675,12 +675,12 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, if (!down_read_trylock(&shrinker_rwsem)) return 0; - map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, - true); - if (unlikely(!map)) + info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, + true); + if (unlikely(!info)) goto unlock; - for_each_set_bit(i, map->map, shrinker_nr_max) { + for_each_set_bit(i, info->map, shrinker_nr_max) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, @@ -691,7 +691,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, shrinker = idr_find(&shrinker_idr, i); if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) { if (!shrinker) - clear_bit(i, map->map); + clear_bit(i, info->map); continue; } @@ -702,7 +702,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, ret = do_shrink_slab(&sc, shrinker, priority); if (ret == SHRINK_EMPTY) { - clear_bit(i, map->map); + clear_bit(i, info->map); /* * After the shrinker reported that it had no objects to * free, but before we cleared the corresponding bit in -- cgit v1.2.3-71-gd317 From 41ca668a71e7b03743369a2c6d8b8edc1e943dc8 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Tue, 4 May 2021 18:36:29 -0700 Subject: mm: vmscan: use a new flag to indicate shrinker is registered Currently registered shrinker is indicated by non-NULL shrinker->nr_deferred. This approach is fine with nr_deferred at the shrinker level, but the following patches will move MEMCG_AWARE shrinkers' nr_deferred to memcg level, so their shrinker->nr_deferred would always be NULL. This would prevent the shrinkers from unregistering correctly. Remove SHRINKER_REGISTERING since we could check if shrinker is registered successfully by the new flag. Link: https://lkml.kernel.org/r/20210311190845.9708-9-shy828301@gmail.com Signed-off-by: Yang Shi Acked-by: Kirill Tkhai Acked-by: Vlastimil Babka Acked-by: Roman Gushchin Reviewed-by: Shakeel Butt Cc: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/shrinker.h | 7 ++++--- mm/vmscan.c | 40 +++++++++++++++------------------------- 2 files changed, 19 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 0f80123650e2..1eac79ce57d4 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -79,13 +79,14 @@ struct shrinker { #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ /* Flags */ -#define SHRINKER_NUMA_AWARE (1 << 0) -#define SHRINKER_MEMCG_AWARE (1 << 1) +#define SHRINKER_REGISTERED (1 << 0) +#define SHRINKER_NUMA_AWARE (1 << 1) +#define SHRINKER_MEMCG_AWARE (1 << 2) /* * It just makes sense when the shrinker is also MEMCG_AWARE for now, * non-MEMCG_AWARE shrinker should not have this flag set. */ -#define SHRINKER_NONSLAB (1 << 2) +#define SHRINKER_NONSLAB (1 << 3) extern int prealloc_shrinker(struct shrinker *shrinker); extern void register_shrinker_prepared(struct shrinker *shrinker); diff --git a/mm/vmscan.c b/mm/vmscan.c index 400f4a657b27..d1601163d895 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -316,19 +316,6 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) } } -/* - * We allow subsystems to populate their shrinker-related - * LRU lists before register_shrinker_prepared() is called - * for the shrinker, since we don't want to impose - * restrictions on their internal registration order. - * In this case shrink_slab_memcg() may find corresponding - * bit is set in the shrinkers map. - * - * This value is used by the function to detect registering - * shrinkers and to skip do_shrink_slab() calls for them. - */ -#define SHRINKER_REGISTERING ((struct shrinker *)~0UL) - static DEFINE_IDR(shrinker_idr); static int prealloc_memcg_shrinker(struct shrinker *shrinker) @@ -337,7 +324,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) down_write(&shrinker_rwsem); /* This may call shrinker, so it must use down_read_trylock() */ - id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL); + id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL); if (id < 0) goto unlock; @@ -360,9 +347,9 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker) BUG_ON(id < 0); - down_write(&shrinker_rwsem); + lockdep_assert_held(&shrinker_rwsem); + idr_remove(&shrinker_idr, id); - up_write(&shrinker_rwsem); } static bool cgroup_reclaim(struct scan_control *sc) @@ -490,8 +477,11 @@ void free_prealloced_shrinker(struct shrinker *shrinker) if (!shrinker->nr_deferred) return; - if (shrinker->flags & SHRINKER_MEMCG_AWARE) + if (shrinker->flags & SHRINKER_MEMCG_AWARE) { + down_write(&shrinker_rwsem); unregister_memcg_shrinker(shrinker); + up_write(&shrinker_rwsem); + } kfree(shrinker->nr_deferred); shrinker->nr_deferred = NULL; @@ -501,10 +491,7 @@ void register_shrinker_prepared(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); -#ifdef CONFIG_MEMCG - if (shrinker->flags & SHRINKER_MEMCG_AWARE) - idr_replace(&shrinker_idr, shrinker, shrinker->id); -#endif + shrinker->flags |= SHRINKER_REGISTERED; up_write(&shrinker_rwsem); } @@ -524,13 +511,16 @@ EXPORT_SYMBOL(register_shrinker); */ void unregister_shrinker(struct shrinker *shrinker) { - if (!shrinker->nr_deferred) + if (!(shrinker->flags & SHRINKER_REGISTERED)) return; - if (shrinker->flags & SHRINKER_MEMCG_AWARE) - unregister_memcg_shrinker(shrinker); + down_write(&shrinker_rwsem); list_del(&shrinker->list); + shrinker->flags &= ~SHRINKER_REGISTERED; + if (shrinker->flags & SHRINKER_MEMCG_AWARE) + unregister_memcg_shrinker(shrinker); up_write(&shrinker_rwsem); + kfree(shrinker->nr_deferred); shrinker->nr_deferred = NULL; } @@ -695,7 +685,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, struct shrinker *shrinker; shrinker = idr_find(&shrinker_idr, i); - if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) { + if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) { if (!shrinker) clear_bit(i, info->map); continue; -- cgit v1.2.3-71-gd317 From 3c6f17e6c5d048c8029578c475dd037dd5db58af Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Tue, 4 May 2021 18:36:33 -0700 Subject: mm: vmscan: add per memcg shrinker nr_deferred Currently the number of deferred objects are per shrinker, but some slabs, for example, vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs. The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs may suffer from over shrink, excessive reclaim latency, etc. For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs heavy workload. Workload in A generates excessive deferred objects, then B's vfs cache might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim. We observed this hit in our production environment which was running vfs heavy workload shown as the below tracing log: <...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458: nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721 cache items 246404277 delta 31345 total_scan 123202138 <...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458: nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602 last shrinker return val 123186855 The vfs cache and page cache ratio was 10:1 on this machine, and half of caches were dropped. This also resulted in significant amount of page caches were dropped due to inodes eviction. Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring better isolation. The following patch will add nr_deferred to parent memcg when memcg offline. To preserve nr_deferred when reparenting memcgs to root, root memcg needs shrinker_info allocated too. When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred would be used. And non memcg aware shrinkers use shrinker's nr_deferred all the time. Link: https://lkml.kernel.org/r/20210311190845.9708-10-shy828301@gmail.com Signed-off-by: Yang Shi Acked-by: Roman Gushchin Acked-by: Kirill Tkhai Reviewed-by: Shakeel Butt Cc: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 7 +++--- mm/vmscan.c | 60 ++++++++++++++++++++++++++++++++-------------- 2 files changed, 46 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6cd800fe9a67..32bd62047238 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -114,12 +114,13 @@ struct batched_lruvec_stat { }; /* - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, - * which have elements charged to this memcg. + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware + * shrinkers, which have elements charged to this memcg. */ struct shrinker_info { struct rcu_head rcu; - unsigned long map[]; + atomic_long_t *nr_deferred; + unsigned long *map; }; /* diff --git a/mm/vmscan.c b/mm/vmscan.c index d1601163d895..db668c4c78f4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -187,11 +187,17 @@ static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG static int shrinker_nr_max; +/* The shrinker_info is expanded in a batch of BITS_PER_LONG */ static inline int shrinker_map_size(int nr_items) { return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long)); } +static inline int shrinker_defer_size(int nr_items) +{ + return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t)); +} + static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, int nid) { @@ -200,11 +206,13 @@ static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, } static int expand_one_shrinker_info(struct mem_cgroup *memcg, - int size, int old_size) + int map_size, int defer_size, + int old_map_size, int old_defer_size) { struct shrinker_info *new, *old; struct mem_cgroup_per_node *pn; int nid; + int size = map_size + defer_size; for_each_node(nid) { pn = memcg->nodeinfo[nid]; @@ -217,9 +225,16 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg, if (!new) return -ENOMEM; - /* Set all old bits, clear all new bits */ - memset(new->map, (int)0xff, old_size); - memset((void *)new->map + old_size, 0, size - old_size); + new->nr_deferred = (atomic_long_t *)(new + 1); + new->map = (void *)new->nr_deferred + defer_size; + + /* map: set all old bits, clear all new bits */ + memset(new->map, (int)0xff, old_map_size); + memset((void *)new->map + old_map_size, 0, map_size - old_map_size); + /* nr_deferred: copy old values, clear all new values */ + memcpy(new->nr_deferred, old->nr_deferred, old_defer_size); + memset((void *)new->nr_deferred + old_defer_size, 0, + defer_size - old_defer_size); rcu_assign_pointer(pn->shrinker_info, new); kvfree_rcu(old, rcu); @@ -234,9 +249,6 @@ void free_shrinker_info(struct mem_cgroup *memcg) struct shrinker_info *info; int nid; - if (mem_cgroup_is_root(memcg)) - return; - for_each_node(nid) { pn = memcg->nodeinfo[nid]; info = rcu_dereference_protected(pn->shrinker_info, true); @@ -249,12 +261,12 @@ int alloc_shrinker_info(struct mem_cgroup *memcg) { struct shrinker_info *info; int nid, size, ret = 0; - - if (mem_cgroup_is_root(memcg)) - return 0; + int map_size, defer_size = 0; down_write(&shrinker_rwsem); - size = shrinker_map_size(shrinker_nr_max); + map_size = shrinker_map_size(shrinker_nr_max); + defer_size = shrinker_defer_size(shrinker_nr_max); + size = map_size + defer_size; for_each_node(nid) { info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid); if (!info) { @@ -262,6 +274,8 @@ int alloc_shrinker_info(struct mem_cgroup *memcg) ret = -ENOMEM; break; } + info->nr_deferred = (atomic_long_t *)(info + 1); + info->map = (void *)info->nr_deferred + defer_size; rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); } up_write(&shrinker_rwsem); @@ -269,15 +283,21 @@ int alloc_shrinker_info(struct mem_cgroup *memcg) return ret; } +static inline bool need_expand(int nr_max) +{ + return round_up(nr_max, BITS_PER_LONG) > + round_up(shrinker_nr_max, BITS_PER_LONG); +} + static int expand_shrinker_info(int new_id) { - int size, old_size, ret = 0; + int ret = 0; int new_nr_max = new_id + 1; + int map_size, defer_size = 0; + int old_map_size, old_defer_size = 0; struct mem_cgroup *memcg; - size = shrinker_map_size(new_nr_max); - old_size = shrinker_map_size(shrinker_nr_max); - if (size <= old_size) + if (!need_expand(new_nr_max)) goto out; if (!root_mem_cgroup) @@ -285,11 +305,15 @@ static int expand_shrinker_info(int new_id) lockdep_assert_held(&shrinker_rwsem); + map_size = shrinker_map_size(new_nr_max); + defer_size = shrinker_defer_size(new_nr_max); + old_map_size = shrinker_map_size(shrinker_nr_max); + old_defer_size = shrinker_defer_size(shrinker_nr_max); + memcg = mem_cgroup_iter(NULL, NULL, NULL); do { - if (mem_cgroup_is_root(memcg)) - continue; - ret = expand_one_shrinker_info(memcg, size, old_size); + ret = expand_one_shrinker_info(memcg, map_size, defer_size, + old_map_size, old_defer_size); if (ret) { mem_cgroup_iter_break(NULL, memcg); goto out; -- cgit v1.2.3-71-gd317 From a178015cde69981cdcd8f109c5abc98703fead62 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Tue, 4 May 2021 18:36:42 -0700 Subject: mm: memcontrol: reparent nr_deferred when memcg offline Now shrinker's nr_deferred is per memcg for memcg aware shrinkers, add to parent's corresponding nr_deferred when memcg offline. Link: https://lkml.kernel.org/r/20210311190845.9708-13-shy828301@gmail.com Signed-off-by: Yang Shi Acked-by: Vlastimil Babka Acked-by: Kirill Tkhai Acked-by: Roman Gushchin Reviewed-by: Shakeel Butt Cc: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 1 + mm/memcontrol.c | 1 + mm/vmscan.c | 24 ++++++++++++++++++++++++ 3 files changed, 26 insertions(+) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 32bd62047238..c193be760709 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1614,6 +1614,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) int alloc_shrinker_info(struct mem_cgroup *memcg); void free_shrinker_info(struct mem_cgroup *memcg); void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); +void reparent_shrinker_deferred(struct mem_cgroup *memcg); #else #define mem_cgroup_sockets_enabled 0 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 36f31d611dea..3004afb6d090 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5154,6 +5154,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) page_counter_set_low(&memcg->memory, 0); memcg_offline_kmem(memcg); + reparent_shrinker_deferred(memcg); wb_memcg_offline(memcg); drain_all_stock(memcg); diff --git a/mm/vmscan.c b/mm/vmscan.c index 9617c5ff3e98..8c2d2003acbe 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -397,6 +397,30 @@ static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]); } +void reparent_shrinker_deferred(struct mem_cgroup *memcg) +{ + int i, nid; + long nr; + struct mem_cgroup *parent; + struct shrinker_info *child_info, *parent_info; + + parent = parent_mem_cgroup(memcg); + if (!parent) + parent = root_mem_cgroup; + + /* Prevent from concurrent shrinker_info expand */ + down_read(&shrinker_rwsem); + for_each_node(nid) { + child_info = shrinker_info_protected(memcg, nid); + parent_info = shrinker_info_protected(parent, nid); + for (i = 0; i < shrinker_nr_max; i++) { + nr = atomic_long_read(&child_info->nr_deferred[i]); + atomic_long_add(nr, &parent_info->nr_deferred[i]); + } + } + up_read(&shrinker_rwsem); +} + static bool cgroup_reclaim(struct scan_control *sc) { return sc->target_mem_cgroup; -- cgit v1.2.3-71-gd317 From ef4984384172e93cc95e0e8cd102536d67e8a787 Mon Sep 17 00:00:00 2001 From: Pintu Kumar Date: Tue, 4 May 2021 18:36:48 -0700 Subject: mm/compaction: remove unused variable sysctl_compact_memory The sysctl_compact_memory is mostly unused in mm/compaction.c It just acts as a place holder for sysctl to store .data. But the .data itself is not needed here. So we can get ride of this variable completely and make .data as NULL. This will also eliminate the extern declaration from header file. No functionality is broken or changed this way. Link: https://lkml.kernel.org/r/1614852224-14671-1-git-send-email-pintu@codeaurora.org Signed-off-by: Pintu Kumar Signed-off-by: Pintu Agarwal Reviewed-by: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 1 - kernel/sysctl.c | 2 +- mm/compaction.c | 3 --- 3 files changed, 1 insertion(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index ed4070ed41ef..4221888bdcd6 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -81,7 +81,6 @@ static inline unsigned long compact_gap(unsigned int order) } #ifdef CONFIG_COMPACTION -extern int sysctl_compact_memory; extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f91d327273c1..14edf84cc571 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2830,7 +2830,7 @@ static struct ctl_table vm_table[] = { #ifdef CONFIG_COMPACTION { .procname = "compact_memory", - .data = &sysctl_compact_memory, + .data = NULL, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, diff --git a/mm/compaction.c b/mm/compaction.c index 335862f1661c..027eb794e747 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2692,9 +2692,6 @@ static void compact_nodes(void) compact_node(nid); } -/* The written value is actually unused, all memory is compacted */ -int sysctl_compact_memory; - /* * Tunable for proactive compaction. It determines how * aggressively the kernel should compact memory in the -- cgit v1.2.3-71-gd317 From d479960e44f27e0e52ba31b21740b703c538027c Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 4 May 2021 18:36:54 -0700 Subject: mm: disable LRU pagevec during the migration temporarily LRU pagevec holds refcount of pages until the pagevec are drained. It could prevent migration since the refcount of the page is greater than the expection in migration logic. To mitigate the issue, callers of migrate_pages drains LRU pagevec via migrate_prep or lru_add_drain_all before migrate_pages call. However, it's not enough because pages coming into pagevec after the draining call still could stay at the pagevec so it could keep preventing page migration. Since some callers of migrate_pages have retrial logic with LRU draining, the page would migrate at next trail but it is still fragile in that it doesn't close the fundamental race between upcoming LRU pages into pagvec and migration so the migration failure could cause contiguous memory allocation failure in the end. To close the race, this patch disables lru caches(i.e, pagevec) during ongoing migration until migrate is done. Since it's really hard to reproduce, I measured how many times migrate_pages retried with force mode(it is about a fallback to a sync migration) with below debug code. int migrate_pages(struct list_head *from, new_page_t get_new_page, .. .. if (rc && reason == MR_CONTIG_RANGE && pass > 2) { printk(KERN_ERR, "pfn 0x%lx reason %d", page_to_pfn(page), rc); dump_page(page, "fail to migrate"); } The test was repeating android apps launching with cma allocation in background every five seconds. Total cma allocation count was about 500 during the testing. With this patch, the dump_page count was reduced from 400 to 30. The new interface is also useful for memory hotplug which currently drains lru pcp caches after each migration failure. This is rather suboptimal as it has to disrupt others running during the operation. With the new interface the operation happens only once. This is also in line with pcp allocator cache which are disabled for the offlining as well. Link: https://lkml.kernel.org/r/20210319175127.886124-1-minchan@kernel.org Signed-off-by: Minchan Kim Reviewed-by: Chris Goldsworthy Acked-by: Michal Hocko Cc: John Dias Cc: Suren Baghdasaryan Cc: Matthew Wilcox Cc: David Hildenbrand Cc: Vlastimil Babka Cc: Oliver Sang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 2 ++ include/linux/swap.h | 14 +++++++++++ mm/memory_hotplug.c | 3 ++- mm/mempolicy.c | 4 ++++ mm/migrate.c | 11 +++++---- mm/page_alloc.c | 2 ++ mm/swap.c | 64 ++++++++++++++++++++++++++++++++++++++++++------- 7 files changed, 86 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 3a389633b68f..9e4a2dc8622c 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -46,6 +46,7 @@ extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); extern void migrate_prep(void); +extern void migrate_finish(void); extern void migrate_prep_local(void); extern void migrate_page_states(struct page *newpage, struct page *page); extern void migrate_page_copy(struct page *newpage, struct page *page); @@ -67,6 +68,7 @@ static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } static inline int migrate_prep(void) { return -ENOSYS; } +static inline int migrate_finish(void) { return -ENOSYS; } static inline int migrate_prep_local(void) { return -ENOSYS; } static inline void migrate_page_states(struct page *newpage, struct page *page) diff --git a/include/linux/swap.h b/include/linux/swap.h index 42191da1bdc9..f69e0f67651d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -340,6 +340,20 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file, extern void lru_note_cost_page(struct page *); extern void lru_cache_add(struct page *); extern void mark_page_accessed(struct page *); + +extern atomic_t lru_disable_count; + +static inline bool lru_cache_disabled(void) +{ + return atomic_read(&lru_disable_count); +} + +static inline void lru_cache_enable(void) +{ + atomic_dec(&lru_disable_count); +} + +extern void lru_cache_disable(void); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0cdbbfbc5757..729fba144c71 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1611,6 +1611,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) * in a way that pages from isolated pageblock are left on pcplists. */ zone_pcp_disable(zone); + lru_cache_disable(); /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn, @@ -1642,7 +1643,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) } cond_resched(); - lru_add_drain_all(); ret = scan_movable_pages(pfn, end_pfn, &pfn); if (!ret) { @@ -1687,6 +1687,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; spin_unlock_irqrestore(&zone->lock, flags); + lru_cache_enable(); zone_pcp_enable(zone); /* removal success */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cd0295567a04..3b95e169e97d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1208,6 +1208,8 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, break; } mmap_read_unlock(mm); + + migrate_finish(); if (err < 0) return err; return busy; @@ -1371,6 +1373,8 @@ up_out: mmap_write_unlock(mm); mpol_out: mpol_put(new); + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) + migrate_finish(); return err; } diff --git a/mm/migrate.c b/mm/migrate.c index 47df0df8f21a..5b09567dc293 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -66,11 +66,13 @@ void migrate_prep(void) { /* * Clear the LRU lists so pages can be isolated. - * Note that pages may be moved off the LRU after we have - * drained them. Those pages will fail to migrate like other - * pages that may be busy. */ - lru_add_drain_all(); + lru_cache_disable(); +} + +void migrate_finish(void) +{ + lru_cache_enable(); } /* Do the necessary work of migrate_prep but not if it involves other CPUs */ @@ -1838,6 +1840,7 @@ out_flush: if (err >= 0) err = err1; out: + migrate_finish(); return err; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 64d4aae2a78a..2cefb634e0d6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8715,6 +8715,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, if (ret == -ENOMEM) break; } + + migrate_finish(); if (ret < 0) { alloc_contig_dump_pages(&cc->migratepages); putback_movable_pages(&cc->migratepages); diff --git a/mm/swap.c b/mm/swap.c index 31b844d4ed94..c94f55e7b649 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -235,6 +235,18 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec) } } +/* return true if pagevec needs to drain */ +static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) +{ + bool ret = false; + + if (!pagevec_add(pvec, page) || PageCompound(page) || + lru_cache_disabled()) + ret = true; + + return ret; +} + /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the @@ -252,7 +264,7 @@ void rotate_reclaimable_page(struct page *page) get_page(page); local_lock_irqsave(&lru_rotate.lock, flags); pvec = this_cpu_ptr(&lru_rotate.pvec); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, pagevec_move_tail_fn); local_unlock_irqrestore(&lru_rotate.lock, flags); } @@ -343,7 +355,7 @@ static void activate_page(struct page *page) local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.activate_page); get_page(page); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page); local_unlock(&lru_pvecs.lock); } @@ -458,7 +470,7 @@ void lru_cache_add(struct page *page) get_page(page); local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_add); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) __pagevec_lru_add(pvec); local_unlock(&lru_pvecs.lock); } @@ -654,7 +666,7 @@ void deactivate_file_page(struct page *page) local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); local_unlock(&lru_pvecs.lock); } @@ -676,7 +688,7 @@ void deactivate_page(struct page *page) local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate); get_page(page); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn); local_unlock(&lru_pvecs.lock); } @@ -698,7 +710,7 @@ void mark_page_lazyfree(struct page *page) local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree); get_page(page); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn); local_unlock(&lru_pvecs.lock); } @@ -735,7 +747,7 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) * Calling this function with cpu hotplug locks held can actually lead * to obscure indirect dependencies via WQ context. */ -void lru_add_drain_all(void) +inline void __lru_add_drain_all(bool force_all_cpus) { /* * lru_drain_gen - Global pages generation number @@ -780,7 +792,7 @@ void lru_add_drain_all(void) * (C) Exit the draining operation if a newer generation, from another * lru_add_drain_all(), was already scheduled for draining. Check (A). */ - if (unlikely(this_gen != lru_drain_gen)) + if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) goto done; /* @@ -810,7 +822,8 @@ void lru_add_drain_all(void) for_each_online_cpu(cpu) { struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); - if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || + if (force_all_cpus || + pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || @@ -828,6 +841,11 @@ void lru_add_drain_all(void) done: mutex_unlock(&lock); } + +void lru_add_drain_all(void) +{ + __lru_add_drain_all(false); +} #else void lru_add_drain_all(void) { @@ -835,6 +853,34 @@ void lru_add_drain_all(void) } #endif /* CONFIG_SMP */ +atomic_t lru_disable_count = ATOMIC_INIT(0); + +/* + * lru_cache_disable() needs to be called before we start compiling + * a list of pages to be migrated using isolate_lru_page(). + * It drains pages on LRU cache and then disable on all cpus until + * lru_cache_enable is called. + * + * Must be paired with a call to lru_cache_enable(). + */ +void lru_cache_disable(void) +{ + atomic_inc(&lru_disable_count); +#ifdef CONFIG_SMP + /* + * lru_add_drain_all in the force mode will schedule draining on + * all online CPUs so any calls of lru_cache_disabled wrapped by + * local_lock or preemption disabled would be ordered by that. + * The atomic operation doesn't need to have stronger ordering + * requirements because that is enforeced by the scheduling + * guarantees. + */ + __lru_add_drain_all(true); +#else + lru_add_drain(); +#endif +} + /** * release_pages - batched put_page() * @pages: array of pages to release -- cgit v1.2.3-71-gd317 From 361a2a229fa31ab7f2b236b5946e434964d00762 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 4 May 2021 18:36:57 -0700 Subject: mm: replace migrate_[prep|finish] with lru_cache_[disable|enable] Currently, migrate_[prep|finish] is merely a wrapper of lru_cache_[disable|enable]. There is not much to gain from having additional abstraction. Use lru_cache_[disable|enable] instead of migrate_[prep|finish], which would be more descriptive. note: migrate_prep_local in compaction.c changed into lru_add_drain to avoid CPU schedule cost with involving many other CPUs to keep old behavior. Link: https://lkml.kernel.org/r/20210319175127.886124-2-minchan@kernel.org Signed-off-by: Minchan Kim Acked-by: Michal Hocko Reviewed-by: David Hildenbrand Cc: Chris Goldsworthy Cc: John Dias Cc: Matthew Wilcox Cc: Oliver Sang Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 7 ------- mm/compaction.c | 3 ++- mm/mempolicy.c | 8 ++++---- mm/migrate.c | 28 ++-------------------------- mm/page_alloc.c | 4 ++-- 5 files changed, 10 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 9e4a2dc8622c..6155d97ec76c 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -45,9 +45,6 @@ extern struct page *alloc_migration_target(struct page *page, unsigned long priv extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); -extern void migrate_prep(void); -extern void migrate_finish(void); -extern void migrate_prep_local(void); extern void migrate_page_states(struct page *newpage, struct page *page); extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, @@ -67,10 +64,6 @@ static inline struct page *alloc_migration_target(struct page *page, static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } -static inline int migrate_prep(void) { return -ENOSYS; } -static inline int migrate_finish(void) { return -ENOSYS; } -static inline int migrate_prep_local(void) { return -ENOSYS; } - static inline void migrate_page_states(struct page *newpage, struct page *page) { } diff --git a/mm/compaction.c b/mm/compaction.c index 1be7928af62b..598dffbd5c8e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2354,7 +2354,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn, sync); - migrate_prep_local(); + /* lru_add_drain_all could be expensive with involving other CPUs */ + lru_add_drain(); while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { int err; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3b95e169e97d..c0343c742bed 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1124,7 +1124,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, int err = 0; nodemask_t tmp; - migrate_prep(); + lru_cache_disable(); mmap_read_lock(mm); @@ -1209,7 +1209,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, } mmap_read_unlock(mm); - migrate_finish(); + lru_cache_enable(); if (err < 0) return err; return busy; @@ -1325,7 +1325,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { - migrate_prep(); + lru_cache_disable(); } { NODEMASK_SCRATCH(scratch); @@ -1374,7 +1374,7 @@ up_out: mpol_out: mpol_put(new); if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) - migrate_finish(); + lru_cache_enable(); return err; } diff --git a/mm/migrate.c b/mm/migrate.c index 5b09567dc293..b5fbeb4bf49a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -57,30 +57,6 @@ #include "internal.h" -/* - * migrate_prep() needs to be called before we start compiling a list of pages - * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is - * undesirable, use migrate_prep_local() - */ -void migrate_prep(void) -{ - /* - * Clear the LRU lists so pages can be isolated. - */ - lru_cache_disable(); -} - -void migrate_finish(void) -{ - lru_cache_enable(); -} - -/* Do the necessary work of migrate_prep but not if it involves other CPUs */ -void migrate_prep_local(void) -{ - lru_add_drain(); -} - int isolate_movable_page(struct page *page, isolate_mode_t mode) { struct address_space *mapping; @@ -1771,7 +1747,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, int start, i; int err = 0, err1; - migrate_prep(); + lru_cache_disable(); for (i = start = 0; i < nr_pages; i++) { const void __user *p; @@ -1840,7 +1816,7 @@ out_flush: if (err >= 0) err = err1; out: - migrate_finish(); + lru_cache_enable(); return err; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2cefb634e0d6..80754fc1f1ff 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8681,7 +8681,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, }; - migrate_prep(); + lru_cache_disable(); while (pfn < end || !list_empty(&cc->migratepages)) { if (fatal_signal_pending(current)) { @@ -8716,7 +8716,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, break; } - migrate_finish(); + lru_cache_enable(); if (ret < 0) { alloc_contig_dump_pages(&cc->migratepages); putback_movable_pages(&cc->migratepages); -- cgit v1.2.3-71-gd317 From 8cc621d2f45ddd3dc664024a647ee7adf48d79a5 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 4 May 2021 18:37:00 -0700 Subject: mm: fs: invalidate BH LRU during page migration Pages containing buffer_heads that are in one of the per-CPU buffer_head LRU caches will be pinned and thus cannot be migrated. This can prevent CMA allocations from succeeding, which are often used on platforms with co-processors (such as a DSP) that can only use physically contiguous memory. It can also prevent memory hot-unplugging from succeeding, which involves migrating at least MIN_MEMORY_BLOCK_SIZE bytes of memory, which ranges from 8 MiB to 1 GiB based on the architecture in use. Correspondingly, invalidate the BH LRU caches before a migration starts and stop any buffer_head from being cached in the LRU caches, until migration has finished. Link: https://lkml.kernel.org/r/20210319175127.886124-3-minchan@kernel.org Signed-off-by: Minchan Kim Reported-by: Chris Goldsworthy Reported-by: Laura Abbott Tested-by: Oliver Sang Cc: David Hildenbrand Cc: John Dias Cc: Matthew Wilcox Cc: Michal Hocko Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/buffer.c | 36 ++++++++++++++++++++++++++++++------ include/linux/buffer_head.h | 4 ++++ mm/swap.c | 5 ++++- 3 files changed, 38 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/fs/buffer.c b/fs/buffer.c index 0cb7ffd4977c..e9872d0dcbf1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1264,6 +1264,15 @@ static void bh_lru_install(struct buffer_head *bh) int i; check_irqs_on(); + /* + * the refcount of buffer_head in bh_lru prevents dropping the + * attached page(i.e., try_to_free_buffers) so it could cause + * failing page migration. + * Skip putting upcoming bh into bh_lru until migration is done. + */ + if (lru_cache_disabled()) + return; + bh_lru_lock(); b = this_cpu_ptr(&bh_lrus); @@ -1404,6 +1413,15 @@ __bread_gfp(struct block_device *bdev, sector_t block, } EXPORT_SYMBOL(__bread_gfp); +static void __invalidate_bh_lrus(struct bh_lru *b) +{ + int i; + + for (i = 0; i < BH_LRU_SIZE; i++) { + brelse(b->bhs[i]); + b->bhs[i] = NULL; + } +} /* * invalidate_bh_lrus() is called rarely - but not only at unmount. * This doesn't race because it runs in each cpu either in irq @@ -1412,16 +1430,12 @@ EXPORT_SYMBOL(__bread_gfp); static void invalidate_bh_lru(void *arg) { struct bh_lru *b = &get_cpu_var(bh_lrus); - int i; - for (i = 0; i < BH_LRU_SIZE; i++) { - brelse(b->bhs[i]); - b->bhs[i] = NULL; - } + __invalidate_bh_lrus(b); put_cpu_var(bh_lrus); } -static bool has_bh_in_lru(int cpu, void *dummy) +bool has_bh_in_lru(int cpu, void *dummy) { struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); int i; @@ -1440,6 +1454,16 @@ void invalidate_bh_lrus(void) } EXPORT_SYMBOL_GPL(invalidate_bh_lrus); +void invalidate_bh_lrus_cpu(int cpu) +{ + struct bh_lru *b; + + bh_lru_lock(); + b = per_cpu_ptr(&bh_lrus, cpu); + __invalidate_bh_lrus(b); + bh_lru_unlock(); +} + void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) { diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 6b47f94378c5..e7e99da31349 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -194,6 +194,8 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, struct buffer_head *__bread_gfp(struct block_device *, sector_t block, unsigned size, gfp_t gfp); void invalidate_bh_lrus(void); +void invalidate_bh_lrus_cpu(int cpu); +bool has_bh_in_lru(int cpu, void *dummy); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void unlock_buffer(struct buffer_head *bh); @@ -406,6 +408,8 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } +static inline void invalidate_bh_lrus_cpu(int cpu) {} +static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; } #define buffer_heads_over_limit 0 #endif /* CONFIG_BLOCK */ diff --git a/mm/swap.c b/mm/swap.c index c94f55e7b649..a75a8265302b 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "internal.h" @@ -641,6 +642,7 @@ void lru_add_drain_cpu(int cpu) pagevec_lru_move_fn(pvec, lru_lazyfree_fn); activate_page_drain(cpu); + invalidate_bh_lrus_cpu(cpu); } /** @@ -828,7 +830,8 @@ inline void __lru_add_drain_all(bool force_all_cpus) pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || - need_activate_page_drain(cpu)) { + need_activate_page_drain(cpu) || + has_bh_in_lru(cpu, NULL)) { INIT_WORK(work, lru_add_drain_per_cpu); queue_work_on(cpu, mm_percpu_wq, work); __cpumask_set_cpu(cpu, &has_work); -- cgit v1.2.3-71-gd317 From 606a6f71a25accfc960a5063c23717ff07aa43a3 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 4 May 2021 18:37:04 -0700 Subject: mm/migrate.c: make putback_movable_page() static Patch series "Cleanup and fixup for mm/migrate.c", v3. This series contains cleanups to remove unnecessary VM_BUG_ON_PAGE and rc != MIGRATEPAGE_SUCCESS check. Also use helper function to remove some duplicated codes. What's more, this fixes potential deadlock in NUMA balancing shared exec THP case and so on. More details can be found in the respective changelogs. This patch (of 5): The putback_movable_page() is just called by putback_movable_pages() and we know the page is locked and both PageMovable() and PageIsolated() is checked right before calling putback_movable_page(). So we make it static and remove all the 3 VM_BUG_ON_PAGE(). Link: https://lkml.kernel.org/r/20210325131524.48181-1-linmiaohe@huawei.com Link: https://lkml.kernel.org/r/20210325131524.48181-2-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: David Hildenbrand Reviewed-by: Yang Shi Cc: Jerome Glisse Cc: Rafael Aquini Cc: Alistair Popple Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 1 - mm/migrate.c | 7 +------ 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 6155d97ec76c..175ef15ae9e8 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -43,7 +43,6 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); -extern void putback_movable_page(struct page *page); extern void migrate_page_states(struct page *newpage, struct page *page); extern void migrate_page_copy(struct page *newpage, struct page *page); diff --git a/mm/migrate.c b/mm/migrate.c index b5fbeb4bf49a..1e1f8324cefe 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -118,15 +118,10 @@ out: return -EBUSY; } -/* It should be called on page which is PG_movable */ -void putback_movable_page(struct page *page) +static void putback_movable_page(struct page *page) { struct address_space *mapping; - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(!PageMovable(page), page); - VM_BUG_ON_PAGE(!PageIsolated(page), page); - mapping = page_mapping(page); mapping->a_ops->putback_page(page); __ClearPageIsolated(page); -- cgit v1.2.3-71-gd317 From bbb269206f3c914d4f23e023de4ec020abea6d1b Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 4 May 2021 18:37:19 -0700 Subject: mm: vmstat: add cma statistics Since CMA is used more widely, it's worth to have CMA allocation statistics into vmstat. With it, we could know how agressively system uses cma allocation and how often it fails. Link: https://lkml.kernel.org/r/20210302183346.3707237-1-minchan@kernel.org Signed-off-by: Minchan Kim Reviewed-by: John Hubbard Cc: John Dias Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vm_event_item.h | 4 ++++ mm/cma.c | 12 +++++++++--- mm/vmstat.c | 4 ++++ 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 18e75974d4e3..21d7c7f72f1c 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -70,6 +70,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #endif #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, +#endif +#ifdef CONFIG_CMA + CMA_ALLOC_SUCCESS, + CMA_ALLOC_FAIL, #endif UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ diff --git a/mm/cma.c b/mm/cma.c index acd6991f77a0..b44a71eb3174 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -435,13 +435,13 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, int ret = -ENOMEM; if (!cma || !cma->count || !cma->bitmap) - return NULL; + goto out; pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, count, align); if (!count) - return NULL; + goto out; mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); @@ -449,7 +449,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, bitmap_count = cma_bitmap_pages_to_bits(cma, count); if (bitmap_count > bitmap_maxno) - return NULL; + goto out; for (;;) { spin_lock_irq(&cma->lock); @@ -506,6 +506,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, } pr_debug("%s(): returned %p\n", __func__, page); +out: + if (page) + count_vm_event(CMA_ALLOC_SUCCESS); + else + count_vm_event(CMA_ALLOC_FAIL); + return page; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 74b2c374b86c..49a8456ec079 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1312,6 +1312,10 @@ const char * const vmstat_text[] = { #ifdef CONFIG_HUGETLB_PAGE "htlb_buddy_alloc_success", "htlb_buddy_alloc_fail", +#endif +#ifdef CONFIG_CMA + "cma_alloc_success", + "cma_alloc_fail", #endif "unevictable_pgs_culled", "unevictable_pgs_scanned", -- cgit v1.2.3-71-gd317 From 78fa51503fdbe463c96eef4c3cf69ca54032647a Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 4 May 2021 18:37:34 -0700 Subject: mm: use proper type for cma_[alloc|release] size_t in cma_alloc is confusing since it makes people think it's byte count, not pages. Change it to unsigned long[1]. The unsigned int in cma_release is also not right so change it. Since we have unsigned long in cma_release, free_contig_range should also respect it. [1] 67a2e213e7e9, mm: cma: fix incorrect type conversion for size during dma allocation Link: https://lore.kernel.org/linux-mm/20210324043434.GP1719932@casper.infradead.org/ Link: https://lkml.kernel.org/r/20210331164018.710560-1-minchan@kernel.org Signed-off-by: Minchan Kim Reviewed-by: David Hildenbrand Cc: Matthew Wilcox Cc: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cma.h | 4 ++-- include/linux/gfp.h | 2 +- include/trace/events/cma.h | 22 +++++++++++----------- mm/cma.c | 17 +++++++++-------- mm/page_alloc.c | 6 +++--- 5 files changed, 26 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cma.h b/include/linux/cma.h index 217999c8a762..53fd8c3cdbd0 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -44,9 +44,9 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, const char *name, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, +extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, bool no_warn); -extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); +extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); #endif diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 26f4d907254a..8a5f6c3d7dba 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -657,7 +657,7 @@ extern int alloc_contig_range(unsigned long start, unsigned long end, extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask); #endif -void free_contig_range(unsigned long pfn, unsigned int nr_pages); +void free_contig_range(unsigned long pfn, unsigned long nr_pages); #ifdef CONFIG_CMA /* CMA stuff */ diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h index 5cf385ae7c08..c3d354702cb0 100644 --- a/include/trace/events/cma.h +++ b/include/trace/events/cma.h @@ -11,7 +11,7 @@ DECLARE_EVENT_CLASS(cma_alloc_class, TP_PROTO(const char *name, unsigned long pfn, const struct page *page, - unsigned int count, unsigned int align), + unsigned long count, unsigned int align), TP_ARGS(name, pfn, page, count, align), @@ -19,7 +19,7 @@ DECLARE_EVENT_CLASS(cma_alloc_class, __string(name, name) __field(unsigned long, pfn) __field(const struct page *, page) - __field(unsigned int, count) + __field(unsigned long, count) __field(unsigned int, align) ), @@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(cma_alloc_class, __entry->align = align; ), - TP_printk("name=%s pfn=%lx page=%p count=%u align=%u", + TP_printk("name=%s pfn=%lx page=%p count=%lu align=%u", __get_str(name), __entry->pfn, __entry->page, @@ -42,7 +42,7 @@ DECLARE_EVENT_CLASS(cma_alloc_class, TRACE_EVENT(cma_release, TP_PROTO(const char *name, unsigned long pfn, const struct page *page, - unsigned int count), + unsigned long count), TP_ARGS(name, pfn, page, count), @@ -50,7 +50,7 @@ TRACE_EVENT(cma_release, __string(name, name) __field(unsigned long, pfn) __field(const struct page *, page) - __field(unsigned int, count) + __field(unsigned long, count) ), TP_fast_assign( @@ -60,7 +60,7 @@ TRACE_EVENT(cma_release, __entry->count = count; ), - TP_printk("name=%s pfn=%lx page=%p count=%u", + TP_printk("name=%s pfn=%lx page=%p count=%lu", __get_str(name), __entry->pfn, __entry->page, @@ -69,13 +69,13 @@ TRACE_EVENT(cma_release, TRACE_EVENT(cma_alloc_start, - TP_PROTO(const char *name, unsigned int count, unsigned int align), + TP_PROTO(const char *name, unsigned long count, unsigned int align), TP_ARGS(name, count, align), TP_STRUCT__entry( __string(name, name) - __field(unsigned int, count) + __field(unsigned long, count) __field(unsigned int, align) ), @@ -85,7 +85,7 @@ TRACE_EVENT(cma_alloc_start, __entry->align = align; ), - TP_printk("name=%s count=%u align=%u", + TP_printk("name=%s count=%lu align=%u", __get_str(name), __entry->count, __entry->align) @@ -94,7 +94,7 @@ TRACE_EVENT(cma_alloc_start, DEFINE_EVENT(cma_alloc_class, cma_alloc_finish, TP_PROTO(const char *name, unsigned long pfn, const struct page *page, - unsigned int count, unsigned int align), + unsigned long count, unsigned int align), TP_ARGS(name, pfn, page, count, align) ); @@ -102,7 +102,7 @@ DEFINE_EVENT(cma_alloc_class, cma_alloc_finish, DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry, TP_PROTO(const char *name, unsigned long pfn, const struct page *page, - unsigned int count, unsigned int align), + unsigned long count, unsigned int align), TP_ARGS(name, pfn, page, count, align) ); diff --git a/mm/cma.c b/mm/cma.c index cdad8c4de921..995e15480937 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -79,7 +79,7 @@ static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, } static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, - unsigned int count) + unsigned long count) { unsigned long bitmap_no, bitmap_count; unsigned long flags; @@ -423,21 +423,21 @@ static inline void cma_debug_show_areas(struct cma *cma) { } * This function allocates part of contiguous memory on specific * contiguous memory area. */ -struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, - bool no_warn) +struct page *cma_alloc(struct cma *cma, unsigned long count, + unsigned int align, bool no_warn) { unsigned long mask, offset; unsigned long pfn = -1; unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; - size_t i; + unsigned long i; struct page *page = NULL; int ret = -ENOMEM; if (!cma || !cma->count || !cma->bitmap) goto out; - pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, + pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma, count, align); if (!count) @@ -505,7 +505,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, } if (ret && !no_warn) { - pr_err_ratelimited("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n", + pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", __func__, cma->name, count, ret); cma_debug_show_areas(cma); } @@ -534,14 +534,15 @@ out: * It returns false when provided pages do not belong to contiguous area and * true otherwise. */ -bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) +bool cma_release(struct cma *cma, const struct page *pages, + unsigned long count) { unsigned long pfn; if (!cma || !pages) return false; - pr_debug("%s(page %p, count %u)\n", __func__, (void *)pages, count); + pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); pfn = page_to_pfn(pages); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 80754fc1f1ff..d12299c08b95 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8973,9 +8973,9 @@ struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, } #endif /* CONFIG_CONTIG_ALLOC */ -void free_contig_range(unsigned long pfn, unsigned int nr_pages) +void free_contig_range(unsigned long pfn, unsigned long nr_pages) { - unsigned int count = 0; + unsigned long count = 0; for (; nr_pages--; pfn++) { struct page *page = pfn_to_page(pfn); @@ -8983,7 +8983,7 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages) count += page_count(page) != 1; __free_page(page); } - WARN(count != 0, "%d pages are still in use!\n", count); + WARN(count != 0, "%lu pages are still in use!\n", count); } EXPORT_SYMBOL(free_contig_range); -- cgit v1.2.3-71-gd317 From 575299ea18a8c0575d4c2ef6ad3fa4d41d529d1c Mon Sep 17 00:00:00 2001 From: Saravanan D Date: Tue, 4 May 2021 18:38:03 -0700 Subject: x86/mm: track linear mapping split events To help with debugging the sluggishness caused by TLB miss/reload, we introduce monotonic hugepage [direct mapped] split event counts since system state: SYSTEM_RUNNING to be displayed as part of /proc/vmstat in x86 servers The lifetime split event information will be displayed at the bottom of /proc/vmstat .... swap_ra 0 swap_ra_hit 0 direct_map_level2_splits 94 direct_map_level3_splits 4 nr_unstable 0 .... One of the many lasting sources of direct hugepage splits is kernel tracing (kprobes, tracepoints). Note that the kernel's code segment [512 MB] points to the same physical addresses that have been already mapped in the kernel's direct mapping range. Source : Documentation/x86/x86_64/mm.rst When we enable kernel tracing, the kernel has to modify attributes/permissions of the text segment hugepages that are direct mapped causing them to split. Kernel's direct mapped hugepages do not coalesce back after split and remain in place for the remainder of the lifetime. An instance of direct page splits when we turn on dynamic kernel tracing .... cat /proc/vmstat | grep -i direct_map_level direct_map_level2_splits 784 direct_map_level3_splits 12 bpftrace -e 'tracepoint:raw_syscalls:sys_enter { @ [pid, comm] = count(); }' cat /proc/vmstat | grep -i direct_map_level direct_map_level2_splits 789 direct_map_level3_splits 12 .... Link: https://lkml.kernel.org/r/20210218235744.1040634-1-saravanand@fb.com Signed-off-by: Saravanan D Acked-by: Tejun Heo Acked-by: Johannes Weiner Acked-by: Dave Hansen Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pat/set_memory.c | 8 ++++++++ include/linux/vm_event_item.h | 4 ++++ mm/vmstat.c | 4 ++++ 3 files changed, 16 insertions(+) (limited to 'include/linux') diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 427980617557..156cd235659f 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include @@ -91,6 +93,12 @@ static void split_page_count(int level) return; direct_pages_count[level]--; + if (system_state == SYSTEM_RUNNING) { + if (level == PG_LEVEL_2M) + count_vm_event(DIRECT_MAP_LEVEL2_SPLIT); + else if (level == PG_LEVEL_1G) + count_vm_event(DIRECT_MAP_LEVEL3_SPLIT); + } direct_pages_count[level - 1] += PTRS_PER_PTE; } diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 21d7c7f72f1c..ae0dd1948c2b 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -124,6 +124,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_SWAP SWAP_RA, SWAP_RA_HIT, +#endif +#ifdef CONFIG_X86 + DIRECT_MAP_LEVEL2_SPLIT, + DIRECT_MAP_LEVEL3_SPLIT, #endif NR_VM_EVENT_ITEMS }; diff --git a/mm/vmstat.c b/mm/vmstat.c index 06cd78dc914d..5ba118521ded 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1369,6 +1369,10 @@ const char * const vmstat_text[] = { "swap_ra", "swap_ra_hit", #endif +#ifdef CONFIG_X86 + "direct_map_level2_splits", + "direct_map_level3_splits", +#endif #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ }; #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ -- cgit v1.2.3-71-gd317 From 1a08ae36cf8b5f26d0c64ebfe46f8eb07ea0b678 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Tue, 4 May 2021 18:38:53 -0700 Subject: mm cma: rename PF_MEMALLOC_NOCMA to PF_MEMALLOC_PIN PF_MEMALLOC_NOCMA is used ot guarantee that the allocator will not return pages that might belong to CMA region. This is currently used for long term gup to make sure that such pins are not going to be done on any CMA pages. When PF_MEMALLOC_NOCMA has been introduced we haven't realized that it is focusing on CMA pages too much and that there is larger class of pages that need the same treatment. MOVABLE zone cannot contain any long term pins as well so it makes sense to reuse and redefine this flag for that usecase as well. Rename the flag to PF_MEMALLOC_PIN which defines an allocation context which can only get pages suitable for long-term pins. Also rename: memalloc_nocma_save()/memalloc_nocma_restore to memalloc_pin_save()/memalloc_pin_restore() and make the new functions common. [rppt@linux.ibm.com: fix renaming of PF_MEMALLOC_NOCMA to PF_MEMALLOC_PIN] Link: https://lkml.kernel.org/r/20210331163816.11517-1-rppt@kernel.org Link: https://lkml.kernel.org/r/20210215161349.246722-6-pasha.tatashin@soleen.com Signed-off-by: Pavel Tatashin Reviewed-by: John Hubbard Acked-by: Michal Hocko Signed-off-by: Mike Rapoport Cc: Dan Williams Cc: David Hildenbrand Cc: David Rientjes Cc: Ingo Molnar Cc: Ira Weiny Cc: James Morris Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: Joonsoo Kim Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Mike Kravetz Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Sasha Levin Cc: Steven Rostedt (VMware) Cc: Tyler Hicks Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 2 +- include/linux/sched/mm.h | 21 +++++---------------- mm/gup.c | 4 ++-- mm/hugetlb.c | 4 ++-- mm/page_alloc.c | 4 ++-- 5 files changed, 12 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c25c8e67030..d2c881384517 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1583,7 +1583,7 @@ extern struct pid *cad_pid; #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ -#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ +#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 90b2a0bce11c..ae654819e8aa 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -271,29 +271,18 @@ static inline void memalloc_noreclaim_restore(unsigned int flags) current->flags = (current->flags & ~PF_MEMALLOC) | flags; } -#ifdef CONFIG_CMA -static inline unsigned int memalloc_nocma_save(void) +static inline unsigned int memalloc_pin_save(void) { - unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; + unsigned int flags = current->flags & PF_MEMALLOC_PIN; - current->flags |= PF_MEMALLOC_NOCMA; + current->flags |= PF_MEMALLOC_PIN; return flags; } -static inline void memalloc_nocma_restore(unsigned int flags) +static inline void memalloc_pin_restore(unsigned int flags) { - current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; + current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags; } -#else -static inline unsigned int memalloc_nocma_save(void) -{ - return 0; -} - -static inline void memalloc_nocma_restore(unsigned int flags) -{ -} -#endif #ifdef CONFIG_MEMCG DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); diff --git a/mm/gup.c b/mm/gup.c index 3a00f6a8ffd6..a1eff7ad31da 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1722,7 +1722,7 @@ static long __gup_longterm_locked(struct mm_struct *mm, long rc; if (gup_flags & FOLL_LONGTERM) - flags = memalloc_nocma_save(); + flags = memalloc_pin_save(); rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL, gup_flags); @@ -1731,7 +1731,7 @@ static long __gup_longterm_locked(struct mm_struct *mm, if (rc > 0) rc = check_and_migrate_cma_pages(mm, start, rc, pages, vmas, gup_flags); - memalloc_nocma_restore(flags); + memalloc_pin_restore(flags); } return rc; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 533e5a26e437..60dc197a4417 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1079,11 +1079,11 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) { struct page *page; - bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA); + bool pin = !!(current->flags & PF_MEMALLOC_PIN); lockdep_assert_held(&hugetlb_lock); list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { - if (nocma && is_migrate_cma_page(page)) + if (pin && is_migrate_cma_page(page)) continue; if (PageHWPoison(page)) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d12299c08b95..c2fc6a64bef9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3865,8 +3865,8 @@ static inline unsigned int current_alloc_flags(gfp_t gfp_mask, #ifdef CONFIG_CMA unsigned int pflags = current->flags; - if (!(pflags & PF_MEMALLOC_NOCMA) && - gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + if (!(pflags & PF_MEMALLOC_PIN) && + gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; #endif -- cgit v1.2.3-71-gd317 From 8e3560d963d22ba41857f48e4114ce80373144ea Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Tue, 4 May 2021 18:39:00 -0700 Subject: mm: honor PF_MEMALLOC_PIN for all movable pages PF_MEMALLOC_PIN is only honored for CMA pages, extend this flag to work for any allocations from ZONE_MOVABLE by removing __GFP_MOVABLE from gfp_mask when this flag is passed in the current context. Add is_pinnable_page() to return true if page is in a pinnable page. A pinnable page is not in ZONE_MOVABLE and not of MIGRATE_CMA type. Link: https://lkml.kernel.org/r/20210215161349.246722-8-pasha.tatashin@soleen.com Signed-off-by: Pavel Tatashin Acked-by: Michal Hocko Cc: Dan Williams Cc: David Hildenbrand Cc: David Rientjes Cc: Ingo Molnar Cc: Ira Weiny Cc: James Morris Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: Joonsoo Kim Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Mike Kravetz Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Sasha Levin Cc: Steven Rostedt (VMware) Cc: Tyler Hicks Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 18 ++++++++++++++++++ include/linux/sched/mm.h | 6 +++++- mm/hugetlb.c | 2 +- mm/page_alloc.c | 20 +++++++++----------- 4 files changed, 33 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 1dbb53c44243..d0e628f511e4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1141,6 +1141,11 @@ static inline bool is_zone_device_page(const struct page *page) } #endif +static inline bool is_zone_movable_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_MOVABLE; +} + #ifdef CONFIG_DEV_PAGEMAP_OPS void free_devmap_managed_page(struct page *page); DECLARE_STATIC_KEY_FALSE(devmap_managed_key); @@ -1550,6 +1555,19 @@ static inline unsigned long page_to_section(const struct page *page) } #endif +/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ +#ifdef CONFIG_MIGRATION +static inline bool is_pinnable_page(struct page *page) +{ + return !is_zone_movable_page(page) && !is_migrate_cma_page(page); +} +#else +static inline bool is_pinnable_page(struct page *page) +{ + return true; +} +#endif + static inline void set_page_zone(struct page *page, enum zone_type zone) { page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index ae654819e8aa..e24b1fe348e3 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -151,12 +151,13 @@ static inline bool in_vfork(struct task_struct *tsk) * Applies per-task gfp context to the given allocation flags. * PF_MEMALLOC_NOIO implies GFP_NOIO * PF_MEMALLOC_NOFS implies GFP_NOFS + * PF_MEMALLOC_PIN implies !GFP_MOVABLE */ static inline gfp_t current_gfp_context(gfp_t flags) { unsigned int pflags = READ_ONCE(current->flags); - if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence @@ -165,6 +166,9 @@ static inline gfp_t current_gfp_context(gfp_t flags) flags &= ~(__GFP_IO | __GFP_FS); else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; + + if (pflags & PF_MEMALLOC_PIN) + flags &= ~__GFP_MOVABLE; } return flags; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 60dc197a4417..629aa4c2259c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1083,7 +1083,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) lockdep_assert_held(&hugetlb_lock); list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { - if (pin && is_migrate_cma_page(page)) + if (pin && !is_pinnable_page(page)) continue; if (PageHWPoison(page)) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3c55eaafede1..81db38926266 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3859,16 +3859,13 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) return alloc_flags; } -static inline unsigned int current_alloc_flags(gfp_t gfp_mask, - unsigned int alloc_flags) +/* Must be called after current_gfp_context() which can change gfp_mask */ +static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, + unsigned int alloc_flags) { #ifdef CONFIG_CMA - unsigned int pflags = current->flags; - - if (!(pflags & PF_MEMALLOC_PIN) && - gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; - #endif return alloc_flags; } @@ -4526,7 +4523,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; - alloc_flags = current_alloc_flags(gfp_mask, alloc_flags); + alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); return alloc_flags; } @@ -4828,7 +4825,7 @@ retry: reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); if (reserve_flags) - alloc_flags = current_alloc_flags(gfp_mask, reserve_flags); + alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags); /* * Reset the nodemask and zonelist iterators if memory policies can be @@ -4997,7 +4994,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, if (should_fail_alloc_page(gfp_mask, order)) return false; - *alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags); + *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); /* Dirty zone balancing only done in the fast path */ ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); @@ -5184,7 +5181,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, * Apply scoped allocation constraints. This is mainly about GFP_NOFS * resp. GFP_NOIO which has to be inherited for all allocation requests * from a particular context which has been marked by - * memalloc_no{fs,io}_{save,restore}. + * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures + * movable zones are not used during allocation. */ gfp = current_gfp_context(gfp); alloc_gfp = gfp; -- cgit v1.2.3-71-gd317 From 9afaf30f7a1aab2022961715a66f644275b8daec Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Tue, 4 May 2021 18:39:04 -0700 Subject: mm/gup: do not migrate zero page On some platforms ZERO_PAGE(0) might end-up in a movable zone. Do not migrate zero page in gup during longterm pinning as migration of zero page is not allowed. For example, in x86 QEMU with 16G of memory and kernelcore=5G parameter, I see the following: Boot#1: zero_pfn 0x48a8d zero_pfn zone: ZONE_DMA32 Boot#2: zero_pfn 0x20168d zero_pfn zone: ZONE_MOVABLE On x86, empty_zero_page is declared in .bss and depending on the loader may end up in different physical locations during boots. Also, move is_zero_pfn() my_zero_pfn() functions under CONFIG_MMU, because zero_pfn that they are using is declared in memory.c which is compiled with CONFIG_MMU. Link: https://lkml.kernel.org/r/20210215161349.246722-9-pasha.tatashin@soleen.com Signed-off-by: Pavel Tatashin Cc: Dan Williams Cc: David Hildenbrand Cc: David Rientjes Cc: Ingo Molnar Cc: Ira Weiny Cc: James Morris Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: Joonsoo Kim Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Michal Hocko Cc: Mike Kravetz Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Sasha Levin Cc: Steven Rostedt (VMware) Cc: Tyler Hicks Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 3 ++- include/linux/mmzone.h | 4 ++++ include/linux/pgtable.h | 12 ++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index d0e628f511e4..76e27ebb28a3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1559,7 +1559,8 @@ static inline unsigned long page_to_section(const struct page *page) #ifdef CONFIG_MIGRATION static inline bool is_pinnable_page(struct page *page) { - return !is_zone_movable_page(page) && !is_migrate_cma_page(page); + return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) || + is_zero_pfn(page_to_pfn(page)); } #else static inline bool is_pinnable_page(struct page *page) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3b2205741048..92b44149d5b9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -427,6 +427,10 @@ enum zone_type { * techniques might use alloc_contig_range() to hide previously * exposed pages from the buddy again (e.g., to implement some sort * of memory unplug in virtio-mem). + * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create + * situations where ZERO_PAGE(0) which is allocated differently + * on different platforms may end up in a movable zone. ZERO_PAGE(0) + * cannot be migrated. * * In general, no unmovable allocations that degrade memory offlining * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 5e772392a379..2194a9cd885c 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1111,6 +1111,7 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, extern void untrack_pfn_moved(struct vm_area_struct *vma); #endif +#ifdef CONFIG_MMU #ifdef __HAVE_COLOR_ZERO_PAGE static inline int is_zero_pfn(unsigned long pfn) { @@ -1134,6 +1135,17 @@ static inline unsigned long my_zero_pfn(unsigned long addr) return zero_pfn; } #endif +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + return 0; +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_MMU */ #ifdef CONFIG_MMU -- cgit v1.2.3-71-gd317 From d1e153fea2a8940273174fc17733c44323d35cd5 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Tue, 4 May 2021 18:39:08 -0700 Subject: mm/gup: migrate pinned pages out of movable zone We should not pin pages in ZONE_MOVABLE. Currently, we do not pin only movable CMA pages. Generalize the function that migrates CMA pages to migrate all movable pages. Use is_pinnable_page() to check which pages need to be migrated Link: https://lkml.kernel.org/r/20210215161349.246722-10-pasha.tatashin@soleen.com Signed-off-by: Pavel Tatashin Reviewed-by: John Hubbard Cc: Dan Williams Cc: David Hildenbrand Cc: David Rientjes Cc: Ingo Molnar Cc: Ira Weiny Cc: James Morris Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: Joonsoo Kim Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Michal Hocko Cc: Mike Kravetz Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Sasha Levin Cc: Steven Rostedt (VMware) Cc: Tyler Hicks Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 1 + include/linux/mmzone.h | 9 ++++-- include/trace/events/migrate.h | 3 +- mm/gup.c | 67 +++++++++++++++++++++--------------------- 4 files changed, 44 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 175ef15ae9e8..4bb4e519e3f5 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -27,6 +27,7 @@ enum migrate_reason { MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, MR_CONTIG_RANGE, + MR_LONGTERM_PIN, MR_TYPES }; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 92b44149d5b9..e8922a67d1a4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -407,8 +407,13 @@ enum zone_type { * to increase the number of THP/huge pages. Notable special cases are: * * 1. Pinned pages: (long-term) pinning of movable pages might - * essentially turn such pages unmovable. Memory offlining might - * retry a long time. + * essentially turn such pages unmovable. Therefore, we do not allow + * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and + * faulted, they come from the right zone right away. However, it is + * still possible that address space already has pages in + * ZONE_MOVABLE at the time when pages are pinned (i.e. user has + * touches that memory before pinning). In such case we migrate them + * to a different zone. When migration fails - pinning fails. * 2. memblock allocations: kernelcore/movablecore setups might create * situations where ZONE_MOVABLE contains unmovable allocations * after boot. Memory offlining and allocations fail early. diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index f2c990603888..9fb2a3bbcdfb 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -20,7 +20,8 @@ EM( MR_SYSCALL, "syscall_or_cpuset") \ EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \ EM( MR_NUMA_MISPLACED, "numa_misplaced") \ - EMe(MR_CONTIG_RANGE, "contig_range") + EM( MR_CONTIG_RANGE, "contig_range") \ + EMe(MR_LONGTERM_PIN, "longterm_pin") /* * First define the enums in the above macros to be exported to userspace diff --git a/mm/gup.c b/mm/gup.c index a1eff7ad31da..4bc57420f535 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -87,11 +87,12 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page, int orig_refs = refs; /* - * Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast - * path, so fail and let the caller fall back to the slow path. + * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a + * right zone, so fail and let the caller fall back to the slow + * path. */ - if (unlikely(flags & FOLL_LONGTERM) && - is_migrate_cma_page(page)) + if (unlikely((flags & FOLL_LONGTERM) && + !is_pinnable_page(page))) return NULL; /* @@ -1600,17 +1601,17 @@ struct page *get_dump_page(unsigned long addr) } #endif /* CONFIG_ELF_CORE */ -#ifdef CONFIG_CMA -static long check_and_migrate_cma_pages(struct mm_struct *mm, - unsigned long start, - unsigned long nr_pages, - struct page **pages, - struct vm_area_struct **vmas, - unsigned int gup_flags) +#ifdef CONFIG_MIGRATION +static long check_and_migrate_movable_pages(struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + struct page **pages, + struct vm_area_struct **vmas, + unsigned int gup_flags) { unsigned long i, isolation_error_count; bool drain_allow; - LIST_HEAD(cma_page_list); + LIST_HEAD(movable_page_list); long ret = nr_pages; struct page *prev_head, *head; struct migration_target_control mtc = { @@ -1628,13 +1629,12 @@ check_again: continue; prev_head = head; /* - * If we get a page from the CMA zone, since we are going to - * be pinning these entries, we might as well move them out - * of the CMA zone if possible. + * If we get a movable page, since we are going to be pinning + * these entries, try to move them out if possible. */ - if (is_migrate_cma_page(head)) { + if (!is_pinnable_page(head)) { if (PageHuge(head)) { - if (!isolate_huge_page(head, &cma_page_list)) + if (!isolate_huge_page(head, &movable_page_list)) isolation_error_count++; } else { if (!PageLRU(head) && drain_allow) { @@ -1646,7 +1646,7 @@ check_again: isolation_error_count++; continue; } - list_add_tail(&head->lru, &cma_page_list); + list_add_tail(&head->lru, &movable_page_list); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_lru(head), @@ -1659,10 +1659,10 @@ check_again: * If list is empty, and no isolation errors, means that all pages are * in the correct zone. */ - if (list_empty(&cma_page_list) && !isolation_error_count) + if (list_empty(&movable_page_list) && !isolation_error_count) return ret; - if (!list_empty(&cma_page_list)) { + if (!list_empty(&movable_page_list)) { /* * drop the above get_user_pages reference. */ @@ -1672,12 +1672,12 @@ check_again: for (i = 0; i < nr_pages; i++) put_page(pages[i]); - ret = migrate_pages(&cma_page_list, alloc_migration_target, + ret = migrate_pages(&movable_page_list, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, - MR_CONTIG_RANGE); + MR_LONGTERM_PIN); if (ret) { - if (!list_empty(&cma_page_list)) - putback_movable_pages(&cma_page_list); + if (!list_empty(&movable_page_list)) + putback_movable_pages(&movable_page_list); return ret > 0 ? -ENOMEM : ret; } @@ -1696,16 +1696,16 @@ check_again: goto check_again; } #else -static long check_and_migrate_cma_pages(struct mm_struct *mm, - unsigned long start, - unsigned long nr_pages, - struct page **pages, - struct vm_area_struct **vmas, - unsigned int gup_flags) +static long check_and_migrate_movable_pages(struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + struct page **pages, + struct vm_area_struct **vmas, + unsigned int gup_flags) { return nr_pages; } -#endif /* CONFIG_CMA */ +#endif /* CONFIG_MIGRATION */ /* * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which @@ -1729,8 +1729,9 @@ static long __gup_longterm_locked(struct mm_struct *mm, if (gup_flags & FOLL_LONGTERM) { if (rc > 0) - rc = check_and_migrate_cma_pages(mm, start, rc, pages, - vmas, gup_flags); + rc = check_and_migrate_movable_pages(mm, start, rc, + pages, vmas, + gup_flags); memalloc_pin_restore(flags); } return rc; -- cgit v1.2.3-71-gd317 From a08a2ae3461383c2d50d0997dcc6cd1dd1fefb08 Mon Sep 17 00:00:00 2001 From: Oscar Salvador Date: Tue, 4 May 2021 18:39:42 -0700 Subject: mm,memory_hotplug: allocate memmap from the added memory range Physical memory hotadd has to allocate a memmap (struct page array) for the newly added memory section. Currently, alloc_pages_node() is used for those allocations. This has some disadvantages: a) an existing memory is consumed for that purpose (eg: ~2MB per 128MB memory section on x86_64) This can even lead to extreme cases where system goes OOM because the physically hotplugged memory depletes the available memory before it is onlined. b) if the whole node is movable then we have off-node struct pages which has performance drawbacks. c) It might be there are no PMD_ALIGNED chunks so memmap array gets populated with base pages. This can be improved when CONFIG_SPARSEMEM_VMEMMAP is enabled. Vmemap page tables can map arbitrary memory. That means that we can reserve a part of the physically hotadded memory to back vmemmap page tables. This implementation uses the beginning of the hotplugged memory for that purpose. There are some non-obviously things to consider though. Vmemmap pages are allocated/freed during the memory hotplug events (add_memory_resource(), try_remove_memory()) when the memory is added/removed. This means that the reserved physical range is not online although it is used. The most obvious side effect is that pfn_to_online_page() returns NULL for those pfns. The current design expects that this should be OK as the hotplugged memory is considered a garbage until it is onlined. For example hibernation wouldn't save the content of those vmmemmaps into the image so it wouldn't be restored on resume but this should be OK as there no real content to recover anyway while metadata is reachable from other data structures (e.g. vmemmap page tables). The reserved space is therefore (de)initialized during the {on,off}line events (mhp_{de}init_memmap_on_memory). That is done by extracting page allocator independent initialization from the regular onlining path. The primary reason to handle the reserved space outside of {on,off}line_pages is to make each initialization specific to the purpose rather than special case them in a single function. As per above, the functions that are introduced are: - mhp_init_memmap_on_memory: Initializes vmemmap pages by calling move_pfn_range_to_zone(), calls kasan_add_zero_shadow(), and onlines as many sections as vmemmap pages fully span. - mhp_deinit_memmap_on_memory: Offlines as many sections as vmemmap pages fully span, removes the range from zhe zone by remove_pfn_range_from_zone(), and calls kasan_remove_zero_shadow() for the range. The new function memory_block_online() calls mhp_init_memmap_on_memory() before doing the actual online_pages(). Should online_pages() fail, we clean up by calling mhp_deinit_memmap_on_memory(). Adjusting of present_pages is done at the end once we know that online_pages() succedeed. On offline, memory_block_offline() needs to unaccount vmemmap pages from present_pages() before calling offline_pages(). This is necessary because offline_pages() tears down some structures based on the fact whether the node or the zone become empty. If offline_pages() fails, we account back vmemmap pages. If it succeeds, we call mhp_deinit_memmap_on_memory(). Hot-remove: We need to be careful when removing memory, as adding and removing memory needs to be done with the same granularity. To check that this assumption is not violated, we check the memory range we want to remove and if a) any memory block has vmemmap pages and b) the range spans more than a single memory block, we scream out loud and refuse to proceed. If all is good and the range was using memmap on memory (aka vmemmap pages), we construct an altmap structure so free_hugepage_table does the right thing and calls vmem_altmap_free instead of free_pagetable. Link: https://lkml.kernel.org/r/20210421102701.25051-5-osalvador@suse.de Signed-off-by: Oscar Salvador Reviewed-by: David Hildenbrand Acked-by: Michal Hocko Cc: Anshuman Khandual Cc: Pavel Tatashin Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 72 ++++++++++++++++-- include/linux/memory.h | 8 +- include/linux/memory_hotplug.h | 15 +++- include/linux/memremap.h | 2 +- include/linux/mmzone.h | 7 +- mm/Kconfig | 5 ++ mm/memory_hotplug.c | 161 ++++++++++++++++++++++++++++++++++++++--- mm/sparse.c | 2 - 8 files changed, 250 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index f209925a5d4e..b31b3af5c490 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -173,16 +173,73 @@ static int memory_block_online(struct memory_block *mem) { unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages; + struct zone *zone; + int ret; + + zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages); + + /* + * Although vmemmap pages have a different lifecycle than the pages + * they describe (they remain until the memory is unplugged), doing + * their initialization and accounting at memory onlining/offlining + * stage helps to keep accounting easier to follow - e.g vmemmaps + * belong to the same zone as the memory they backed. + */ + if (nr_vmemmap_pages) { + ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); + if (ret) + return ret; + } + + ret = online_pages(start_pfn + nr_vmemmap_pages, + nr_pages - nr_vmemmap_pages, zone); + if (ret) { + if (nr_vmemmap_pages) + mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); + return ret; + } + + /* + * Account once onlining succeeded. If the zone was unpopulated, it is + * now already properly populated. + */ + if (nr_vmemmap_pages) + adjust_present_page_count(zone, nr_vmemmap_pages); - return online_pages(start_pfn, nr_pages, mem->online_type, mem->nid); + return ret; } static int memory_block_offline(struct memory_block *mem) { unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages; + struct zone *zone; + int ret; + + zone = page_zone(pfn_to_page(start_pfn)); + + /* + * Unaccount before offlining, such that unpopulated zone and kthreads + * can properly be torn down in offline_pages(). + */ + if (nr_vmemmap_pages) + adjust_present_page_count(zone, -nr_vmemmap_pages); - return offline_pages(start_pfn, nr_pages); + ret = offline_pages(start_pfn + nr_vmemmap_pages, + nr_pages - nr_vmemmap_pages); + if (ret) { + /* offline_pages() failed. Account back. */ + if (nr_vmemmap_pages) + adjust_present_page_count(zone, nr_vmemmap_pages); + return ret; + } + + if (nr_vmemmap_pages) + mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); + + return ret; } /* @@ -576,7 +633,8 @@ int register_memory(struct memory_block *memory) return ret; } -static int init_memory_block(unsigned long block_id, unsigned long state) +static int init_memory_block(unsigned long block_id, unsigned long state, + unsigned long nr_vmemmap_pages) { struct memory_block *mem; int ret = 0; @@ -593,6 +651,7 @@ static int init_memory_block(unsigned long block_id, unsigned long state) mem->start_section_nr = block_id * sections_per_block; mem->state = state; mem->nid = NUMA_NO_NODE; + mem->nr_vmemmap_pages = nr_vmemmap_pages; ret = register_memory(mem); @@ -612,7 +671,7 @@ static int add_memory_block(unsigned long base_section_nr) if (section_count == 0) return 0; return init_memory_block(memory_block_id(base_section_nr), - MEM_ONLINE); + MEM_ONLINE, 0); } static void unregister_memory(struct memory_block *memory) @@ -634,7 +693,8 @@ static void unregister_memory(struct memory_block *memory) * * Called under device_hotplug_lock. */ -int create_memory_block_devices(unsigned long start, unsigned long size) +int create_memory_block_devices(unsigned long start, unsigned long size, + unsigned long vmemmap_pages) { const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); @@ -647,7 +707,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size) return -EINVAL; for (block_id = start_block_id; block_id != end_block_id; block_id++) { - ret = init_memory_block(block_id, MEM_OFFLINE); + ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages); if (ret) break; } diff --git a/include/linux/memory.h b/include/linux/memory.h index 4da95e684e20..97e92e8b556a 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -29,6 +29,11 @@ struct memory_block { int online_type; /* for passing data to online routine */ int nid; /* NID for this memory block */ struct device dev; + /* + * Number of vmemmap pages. These pages + * lay at the beginning of the memory block. + */ + unsigned long nr_vmemmap_pages; }; int arch_get_memory_phys_device(unsigned long start_pfn); @@ -80,7 +85,8 @@ static inline int memory_notify(unsigned long val, void *v) #else extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); -int create_memory_block_devices(unsigned long start, unsigned long size); +int create_memory_block_devices(unsigned long start, unsigned long size, + unsigned long vmemmap_pages); void remove_memory_block_devices(unsigned long start, unsigned long size); extern void memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 7288aa5ef73b..28f32fd00fe9 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -55,6 +55,14 @@ typedef int __bitwise mhp_t; */ #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) +/* + * We want memmap (struct page array) to be self contained. + * To do so, we will use the beginning of the hot-added range to build + * the page tables for the memmap array that describes the entire range. + * Only selected architectures support it with SPARSE_VMEMMAP. + */ +#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) + /* * Extended parameters for memory hotplug: * altmap: alternative allocator for memmap array (optional) @@ -99,9 +107,13 @@ static inline void zone_seqlock_init(struct zone *zone) extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); +extern void adjust_present_page_count(struct zone *zone, long nr_pages); /* VM interface that may be used by firmware interface */ +extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, + struct zone *zone); +extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, - int online_type, int nid); + struct zone *zone); extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn); extern void __offline_isolated_pages(unsigned long start_pfn, @@ -359,6 +371,7 @@ extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_ extern int arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params); void arch_remove_linear_mapping(u64 start, u64 size); +extern bool mhp_supports_memmap_on_memory(unsigned long size); #endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/memremap.h b/include/linux/memremap.h index f5b464daeeca..45a79da89c5f 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -17,7 +17,7 @@ struct device; * @alloc: track pages consumed, private to vmemmap_populate() */ struct vmem_altmap { - const unsigned long base_pfn; + unsigned long base_pfn; const unsigned long end_pfn; const unsigned long reserve; unsigned long free; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e8922a67d1a4..917bd6c604d5 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -436,6 +436,11 @@ enum zone_type { * situations where ZERO_PAGE(0) which is allocated differently * on different platforms may end up in a movable zone. ZERO_PAGE(0) * cannot be migrated. + * 7. Memory-hotplug: when using memmap_on_memory and onlining the + * memory to the MOVABLE zone, the vmemmap pages are also placed in + * such zone. Such pages cannot be really moved around as they are + * self-stored in the range, but they are treated as movable when + * the range they describe is about to be offlined. * * In general, no unmovable allocations that degrade memory offlining * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) @@ -1392,10 +1397,8 @@ static inline int online_section_nr(unsigned long nr) #ifdef CONFIG_MEMORY_HOTPLUG void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); -#ifdef CONFIG_MEMORY_HOTREMOVE void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); #endif -#endif static inline struct mem_section *__pfn_to_section(unsigned long pfn) { diff --git a/mm/Kconfig b/mm/Kconfig index fe4897c3c81b..02d44e3420f5 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -188,6 +188,11 @@ config MEMORY_HOTREMOVE depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE depends on MIGRATION +config MHP_MEMMAP_ON_MEMORY + def_bool y + depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP + depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE + # Heavily threaded applications may benefit from splitting the mm-wide # page_table_lock, so that faults on different parts of the user address # space can be handled with less contention: split it at this NR_CPUS. diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 04f01fabc150..0b3157836814 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -42,6 +42,8 @@ #include "internal.h" #include "shuffle.h" +static bool memmap_on_memory; + /* * online_page_callback contains pointer to current page onlining function. * Initially it is generic_online_page(). If it is required it could be @@ -648,9 +650,16 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) * decide to not expose all pages to the buddy (e.g., expose them * later). We account all pages as being online and belonging to this * zone ("present"). + * When using memmap_on_memory, the range might not be aligned to + * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect + * this and the first chunk to online will be pageblock_nr_pages. */ - for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES) - (*online_page_callback)(pfn_to_page(pfn), MAX_ORDER - 1); + for (pfn = start_pfn; pfn < end_pfn;) { + int order = min(MAX_ORDER - 1UL, __ffs(pfn)); + + (*online_page_callback)(pfn_to_page(pfn), order); + pfn += (1UL << order); + } /* mark all involved sections as online */ online_mem_sections(start_pfn, end_pfn); @@ -829,7 +838,11 @@ struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, return default_zone_for_pfn(nid, start_pfn, nr_pages); } -static void adjust_present_page_count(struct zone *zone, long nr_pages) +/* + * This function should only be called by memory_block_{online,offline}, + * and {online,offline}_pages. + */ +void adjust_present_page_count(struct zone *zone, long nr_pages) { unsigned long flags; @@ -839,12 +852,54 @@ static void adjust_present_page_count(struct zone *zone, long nr_pages) pgdat_resize_unlock(zone->zone_pgdat, &flags); } -int __ref online_pages(unsigned long pfn, unsigned long nr_pages, - int online_type, int nid) +int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, + struct zone *zone) +{ + unsigned long end_pfn = pfn + nr_pages; + int ret; + + ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); + if (ret) + return ret; + + move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); + + /* + * It might be that the vmemmap_pages fully span sections. If that is + * the case, mark those sections online here as otherwise they will be + * left offline. + */ + if (nr_pages >= PAGES_PER_SECTION) + online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); + + return ret; +} + +void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) +{ + unsigned long end_pfn = pfn + nr_pages; + + /* + * It might be that the vmemmap_pages fully span sections. If that is + * the case, mark those sections offline here as otherwise they will be + * left online. + */ + if (nr_pages >= PAGES_PER_SECTION) + offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); + + /* + * The pages associated with this vmemmap have been offlined, so + * we can reset its state here. + */ + remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); + kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); +} + +int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone) { unsigned long flags; - struct zone *zone; int need_zonelists_rebuild = 0; + const int nid = zone_to_nid(zone); int ret; struct memory_notify arg; @@ -863,7 +918,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, mem_hotplug_begin(); /* associate pfn range with the zone */ - zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages); move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); arg.start_pfn = pfn; @@ -1077,6 +1131,45 @@ static int online_memory_block(struct memory_block *mem, void *arg) return device_online(&mem->dev); } +bool mhp_supports_memmap_on_memory(unsigned long size) +{ + unsigned long nr_vmemmap_pages = size / PAGE_SIZE; + unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page); + unsigned long remaining_size = size - vmemmap_size; + + /* + * Besides having arch support and the feature enabled at runtime, we + * need a few more assumptions to hold true: + * + * a) We span a single memory block: memory onlining/offlinin;g happens + * in memory block granularity. We don't want the vmemmap of online + * memory blocks to reside on offline memory blocks. In the future, + * we might want to support variable-sized memory blocks to make the + * feature more versatile. + * + * b) The vmemmap pages span complete PMDs: We don't want vmemmap code + * to populate memory from the altmap for unrelated parts (i.e., + * other memory blocks) + * + * c) The vmemmap pages (and thereby the pages that will be exposed to + * the buddy) have to cover full pageblocks: memory onlining/offlining + * code requires applicable ranges to be page-aligned, for example, to + * set the migratetypes properly. + * + * TODO: Although we have a check here to make sure that vmemmap pages + * fully populate a PMD, it is not the right place to check for + * this. A much better solution involves improving vmemmap code + * to fallback to base pages when trying to populate vmemmap using + * altmap as an alternative source of memory, and we do not exactly + * populate a single PMD. + */ + return memmap_on_memory && + IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) && + size == memory_block_size_bytes() && + IS_ALIGNED(vmemmap_size, PMD_SIZE) && + IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)); +} + /* * NOTE: The caller must call lock_device_hotplug() to serialize hotplug * and online/offline operations (triggered e.g. by sysfs). @@ -1086,6 +1179,7 @@ static int online_memory_block(struct memory_block *mem, void *arg) int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) { struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) }; + struct vmem_altmap mhp_altmap = {}; u64 start, size; bool new_node = false; int ret; @@ -1112,13 +1206,26 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) goto error; new_node = ret; + /* + * Self hosted memmap array + */ + if (mhp_flags & MHP_MEMMAP_ON_MEMORY) { + if (!mhp_supports_memmap_on_memory(size)) { + ret = -EINVAL; + goto error; + } + mhp_altmap.free = PHYS_PFN(size); + mhp_altmap.base_pfn = PHYS_PFN(start); + params.altmap = &mhp_altmap; + } + /* call arch's memory hotadd */ ret = arch_add_memory(nid, start, size, ¶ms); if (ret < 0) goto error; /* create memory block devices after memory was added */ - ret = create_memory_block_devices(start, size); + ret = create_memory_block_devices(start, size, mhp_altmap.alloc); if (ret) { arch_remove_memory(nid, start, size, NULL); goto error; @@ -1767,6 +1874,14 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) return 0; } +static int get_nr_vmemmap_pages_cb(struct memory_block *mem, void *arg) +{ + /* + * If not set, continue with the next block. + */ + return mem->nr_vmemmap_pages; +} + static int check_cpu_on_node(pg_data_t *pgdat) { int cpu; @@ -1841,6 +1956,9 @@ EXPORT_SYMBOL(try_offline_node); static int __ref try_remove_memory(int nid, u64 start, u64 size) { int rc = 0; + struct vmem_altmap mhp_altmap = {}; + struct vmem_altmap *altmap = NULL; + unsigned long nr_vmemmap_pages; BUG_ON(check_hotplug_memory_range(start, size)); @@ -1853,6 +1971,31 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) if (rc) return rc; + /* + * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in + * the same granularity it was added - a single memory block. + */ + if (memmap_on_memory) { + nr_vmemmap_pages = walk_memory_blocks(start, size, NULL, + get_nr_vmemmap_pages_cb); + if (nr_vmemmap_pages) { + if (size != memory_block_size_bytes()) { + pr_warn("Refuse to remove %#llx - %#llx," + "wrong granularity\n", + start, start + size); + return -EINVAL; + } + + /* + * Let remove_pmd_table->free_hugepage_table do the + * right thing if we used vmem_altmap when hot-adding + * the range. + */ + mhp_altmap.alloc = nr_vmemmap_pages; + altmap = &mhp_altmap; + } + } + /* remove memmap entry */ firmware_map_remove(start, start + size, "System RAM"); @@ -1864,7 +2007,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) mem_hotplug_begin(); - arch_remove_memory(nid, start, size, NULL); + arch_remove_memory(nid, start, size, altmap); if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { memblock_free(start, size); diff --git a/mm/sparse.c b/mm/sparse.c index 33406ea2ecc4..d3fbed26e64e 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -624,7 +624,6 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) } } -#ifdef CONFIG_MEMORY_HOTREMOVE /* Mark all memory sections within the pfn range as offline */ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) { @@ -645,7 +644,6 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) ms->section_mem_map &= ~SECTION_IS_ONLINE; } } -#endif #ifdef CONFIG_SPARSEMEM_VMEMMAP static struct page * __meminit populate_section_memmap(unsigned long pfn, -- cgit v1.2.3-71-gd317 From 28961998f858114e51d2ae862065b858afcfa2b2 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Tue, 4 May 2021 18:40:03 -0700 Subject: iov_iter: lift memzero_page() to highmem.h Patch series "btrfs: Convert kmap/memset/kunmap to memzero_user()". Lifting memzero_user(), convert it to kmap_local_page() and then use it in btrfs. This patch (of 3): memzero_page() can replace the kmap/memset/kunmap pattern in other places in the code. While zero_user() has the same interface it is not the same call and its use should be limited and some of those calls may be better converted from zero_user() to memzero_page().[1] But that is not addressed in this series. Lift memzero_page() to highmem. [1] https://lore.kernel.org/lkml/CAHk-=wijdojzo56FzYqE5TOYw2Vws7ik3LEMGj9SPQaJJ+Z73Q@mail.gmail.com/ Link: https://lkml.kernel.org/r/20210309212137.2610186-1-ira.weiny@intel.com Link: https://lkml.kernel.org/r/20210309212137.2610186-2-ira.weiny@intel.com Signed-off-by: Ira Weiny Cc: Alexander Viro Cc: David Sterba Cc: Chris Mason Cc: Josef Bacik Cc: Chaitanya Kulkarni Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/highmem.h | 7 +++++++ lib/iov_iter.c | 8 +------- 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 44170f312ae7..832b49b50c7b 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -332,4 +332,11 @@ static inline void memcpy_to_page(struct page *page, size_t offset, kunmap_local(to); } +static inline void memzero_page(struct page *page, size_t offset, size_t len) +{ + char *addr = kmap_atomic(page); + memset(addr + offset, 0, len); + kunmap_atomic(addr); +} + #endif /* _LINUX_HIGHMEM_H */ diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 61228a6c69f8..c701b7a187f2 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -507,13 +508,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_init); -static void memzero_page(struct page *page, size_t offset, size_t len) -{ - char *addr = kmap_atomic(page); - memset(addr + offset, 0, len); - kunmap_atomic(addr); -} - static inline bool allocated(struct pipe_buffer *buf) { return buf->ops == &default_pipe_buf_ops; -- cgit v1.2.3-71-gd317 From 866a6dadbb027b2955a7ae00bab9705d382def12 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 4 May 2021 17:27:28 -0700 Subject: context_tracking: Move guest exit context tracking to separate helpers Provide separate context tracking helpers for guest exit, the standalone helpers will be called separately by KVM x86 in later patches to fix tick-based accounting. Suggested-by: Thomas Gleixner Signed-off-by: Wanpeng Li Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210505002735.1684165-2-seanjc@google.com --- include/linux/context_tracking.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index bceb06498521..b8c7313495a7 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -131,10 +131,15 @@ static __always_inline void guest_enter_irqoff(void) } } -static __always_inline void guest_exit_irqoff(void) +static __always_inline void context_tracking_guest_exit(void) { if (context_tracking_enabled()) __context_tracking_exit(CONTEXT_GUEST); +} + +static __always_inline void guest_exit_irqoff(void) +{ + context_tracking_guest_exit(); instrumentation_begin(); if (vtime_accounting_enabled_this_cpu()) @@ -159,6 +164,8 @@ static __always_inline void guest_enter_irqoff(void) instrumentation_end(); } +static __always_inline void context_tracking_guest_exit(void) { } + static __always_inline void guest_exit_irqoff(void) { instrumentation_begin(); -- cgit v1.2.3-71-gd317 From 88d8220bbf06dd8045b2ac4be1046290eaa7773a Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 4 May 2021 17:27:29 -0700 Subject: context_tracking: Move guest exit vtime accounting to separate helpers Provide separate vtime accounting functions for guest exit instead of open coding the logic within the context tracking code. This will allow KVM x86 to handle vtime accounting slightly differently when using tick-based accounting. Suggested-by: Thomas Gleixner Signed-off-by: Wanpeng Li Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson Signed-off-by: Thomas Gleixner Reviewed-by: Christian Borntraeger Link: https://lore.kernel.org/r/20210505002735.1684165-3-seanjc@google.com --- include/linux/context_tracking.h | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index b8c7313495a7..4f4556232dcf 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -137,15 +137,20 @@ static __always_inline void context_tracking_guest_exit(void) __context_tracking_exit(CONTEXT_GUEST); } -static __always_inline void guest_exit_irqoff(void) +static __always_inline void vtime_account_guest_exit(void) { - context_tracking_guest_exit(); - - instrumentation_begin(); if (vtime_accounting_enabled_this_cpu()) vtime_guest_exit(current); else current->flags &= ~PF_VCPU; +} + +static __always_inline void guest_exit_irqoff(void) +{ + context_tracking_guest_exit(); + + instrumentation_begin(); + vtime_account_guest_exit(); instrumentation_end(); } @@ -166,12 +171,17 @@ static __always_inline void guest_enter_irqoff(void) static __always_inline void context_tracking_guest_exit(void) { } +static __always_inline void vtime_account_guest_exit(void) +{ + vtime_account_kernel(current); + current->flags &= ~PF_VCPU; +} + static __always_inline void guest_exit_irqoff(void) { instrumentation_begin(); /* Flush the guest cputime we spent on the guest */ - vtime_account_kernel(current); - current->flags &= ~PF_VCPU; + vtime_account_guest_exit(); instrumentation_end(); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ -- cgit v1.2.3-71-gd317 From b41c723b203e19480c26f2ec8f04eedc03d34b34 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 May 2021 17:27:31 -0700 Subject: sched/vtime: Move vtime accounting external declarations above inlines Move the blob of external declarations (and their stubs) above the set of inline definitions (and their stubs) for vtime accounting. This will allow a future patch to bring in more inline definitions without also having to shuffle large chunks of code. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Thomas Gleixner Reviewed-by: Christian Borntraeger Link: https://lore.kernel.org/r/20210505002735.1684165-5-seanjc@google.com --- include/linux/vtime.h | 74 +++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 041d6524d144..6a4317560539 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -10,6 +10,43 @@ struct task_struct; +/* + * Common vtime APIs + */ +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void vtime_account_kernel(struct task_struct *tsk); +extern void vtime_account_idle(struct task_struct *tsk); +#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ +static inline void vtime_account_kernel(struct task_struct *tsk) { } +#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void arch_vtime_task_switch(struct task_struct *tsk); +extern void vtime_user_enter(struct task_struct *tsk); +extern void vtime_user_exit(struct task_struct *tsk); +extern void vtime_guest_enter(struct task_struct *tsk); +extern void vtime_guest_exit(struct task_struct *tsk); +extern void vtime_init_idle(struct task_struct *tsk, int cpu); +#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ +static inline void vtime_user_enter(struct task_struct *tsk) { } +static inline void vtime_user_exit(struct task_struct *tsk) { } +static inline void vtime_guest_enter(struct task_struct *tsk) { } +static inline void vtime_guest_exit(struct task_struct *tsk) { } +static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } +#endif + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset); +extern void vtime_account_softirq(struct task_struct *tsk); +extern void vtime_account_hardirq(struct task_struct *tsk); +extern void vtime_flush(struct task_struct *tsk); +#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ +static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { } +static inline void vtime_account_softirq(struct task_struct *tsk) { } +static inline void vtime_account_hardirq(struct task_struct *tsk) { } +static inline void vtime_flush(struct task_struct *tsk) { } +#endif + /* * vtime_accounting_enabled_this_cpu() definitions/declarations */ @@ -57,43 +94,6 @@ static inline void vtime_task_switch(struct task_struct *prev) { } #endif -/* - * Common vtime APIs - */ -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void vtime_account_kernel(struct task_struct *tsk); -extern void vtime_account_idle(struct task_struct *tsk); -#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ -static inline void vtime_account_kernel(struct task_struct *tsk) { } -#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void arch_vtime_task_switch(struct task_struct *tsk); -extern void vtime_user_enter(struct task_struct *tsk); -extern void vtime_user_exit(struct task_struct *tsk); -extern void vtime_guest_enter(struct task_struct *tsk); -extern void vtime_guest_exit(struct task_struct *tsk); -extern void vtime_init_idle(struct task_struct *tsk, int cpu); -#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ -static inline void vtime_user_enter(struct task_struct *tsk) { } -static inline void vtime_user_exit(struct task_struct *tsk) { } -static inline void vtime_guest_enter(struct task_struct *tsk) { } -static inline void vtime_guest_exit(struct task_struct *tsk) { } -static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } -#endif - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset); -extern void vtime_account_softirq(struct task_struct *tsk); -extern void vtime_account_hardirq(struct task_struct *tsk); -extern void vtime_flush(struct task_struct *tsk); -#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { } -static inline void vtime_account_softirq(struct task_struct *tsk) { } -static inline void vtime_account_hardirq(struct task_struct *tsk) { } -static inline void vtime_flush(struct task_struct *tsk) { } -#endif - #ifdef CONFIG_IRQ_TIME_ACCOUNTING extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset); -- cgit v1.2.3-71-gd317 From 6f922b89e5518143920b10e3643e556d9df58d94 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 May 2021 17:27:32 -0700 Subject: sched/vtime: Move guest enter/exit vtime accounting to vtime.h Provide separate helpers for guest enter vtime accounting (in addition to the existing guest exit helpers), and move all vtime accounting helpers to vtime.h where the existing #ifdef infrastructure can be leveraged to better delineate the different types of accounting. This will also allow future cleanups via deduplication of context tracking code. Opportunstically delete the vtime_account_kernel() stub now that all callers are wrapped with CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210505002735.1684165-6-seanjc@google.com --- include/linux/context_tracking.h | 17 +-------------- include/linux/vtime.h | 46 ++++++++++++++++++++++++++++++++++------ 2 files changed, 41 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 4f4556232dcf..56c648bdbde8 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -137,14 +137,6 @@ static __always_inline void context_tracking_guest_exit(void) __context_tracking_exit(CONTEXT_GUEST); } -static __always_inline void vtime_account_guest_exit(void) -{ - if (vtime_accounting_enabled_this_cpu()) - vtime_guest_exit(current); - else - current->flags &= ~PF_VCPU; -} - static __always_inline void guest_exit_irqoff(void) { context_tracking_guest_exit(); @@ -163,20 +155,13 @@ static __always_inline void guest_enter_irqoff(void) * to flush. */ instrumentation_begin(); - vtime_account_kernel(current); - current->flags |= PF_VCPU; + vtime_account_guest_enter(); rcu_virt_note_context_switch(smp_processor_id()); instrumentation_end(); } static __always_inline void context_tracking_guest_exit(void) { } -static __always_inline void vtime_account_guest_exit(void) -{ - vtime_account_kernel(current); - current->flags &= ~PF_VCPU; -} - static __always_inline void guest_exit_irqoff(void) { instrumentation_begin(); diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 6a4317560539..3684487d01e1 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -3,21 +3,18 @@ #define _LINUX_KERNEL_VTIME_H #include +#include + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include #endif - -struct task_struct; - /* * Common vtime APIs */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void vtime_account_kernel(struct task_struct *tsk); extern void vtime_account_idle(struct task_struct *tsk); -#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ -static inline void vtime_account_kernel(struct task_struct *tsk) { } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN @@ -55,6 +52,18 @@ static inline void vtime_flush(struct task_struct *tsk) { } static inline bool vtime_accounting_enabled_this_cpu(void) { return true; } extern void vtime_task_switch(struct task_struct *prev); +static __always_inline void vtime_account_guest_enter(void) +{ + vtime_account_kernel(current); + current->flags |= PF_VCPU; +} + +static __always_inline void vtime_account_guest_exit(void) +{ + vtime_account_kernel(current); + current->flags &= ~PF_VCPU; +} + #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) /* @@ -86,12 +95,37 @@ static inline void vtime_task_switch(struct task_struct *prev) vtime_task_switch_generic(prev); } +static __always_inline void vtime_account_guest_enter(void) +{ + if (vtime_accounting_enabled_this_cpu()) + vtime_guest_enter(current); + else + current->flags |= PF_VCPU; +} + +static __always_inline void vtime_account_guest_exit(void) +{ + if (vtime_accounting_enabled_this_cpu()) + vtime_guest_exit(current); + else + current->flags &= ~PF_VCPU; +} + #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ -static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; } static inline bool vtime_accounting_enabled_this_cpu(void) { return false; } static inline void vtime_task_switch(struct task_struct *prev) { } +static __always_inline void vtime_account_guest_enter(void) +{ + current->flags |= PF_VCPU; +} + +static __always_inline void vtime_account_guest_exit(void) +{ + current->flags &= ~PF_VCPU; +} + #endif -- cgit v1.2.3-71-gd317 From 14296e0c447885d6c7b326e059fb528eb00526ed Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 May 2021 17:27:33 -0700 Subject: context_tracking: Consolidate guest enter/exit wrappers Consolidate the guest enter/exit wrappers, providing and tweaking stubs as needed. This will allow moving the wrappers under KVM without having to bleed #ifdefs into the soon-to-be KVM code. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210505002735.1684165-7-seanjc@google.com --- include/linux/context_tracking.h | 65 +++++++++++++++------------------------- 1 file changed, 24 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 56c648bdbde8..aa58c2ac67ca 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -71,6 +71,19 @@ static inline void exception_exit(enum ctx_state prev_ctx) } } +static __always_inline bool context_tracking_guest_enter(void) +{ + if (context_tracking_enabled()) + __context_tracking_enter(CONTEXT_GUEST); + + return context_tracking_enabled_this_cpu(); +} + +static __always_inline void context_tracking_guest_exit(void) +{ + if (context_tracking_enabled()) + __context_tracking_exit(CONTEXT_GUEST); +} /** * ct_state() - return the current context tracking state if known @@ -92,6 +105,9 @@ static inline void user_exit_irqoff(void) { } static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } +static inline bool context_tracking_guest_enter(void) { return false; } +static inline void context_tracking_guest_exit(void) { } + #endif /* !CONFIG_CONTEXT_TRACKING */ #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) @@ -102,74 +118,41 @@ extern void context_tracking_init(void); static inline void context_tracking_init(void) { } #endif /* CONFIG_CONTEXT_TRACKING_FORCE */ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN /* must be called with irqs disabled */ static __always_inline void guest_enter_irqoff(void) { + /* + * This is running in ioctl context so its safe to assume that it's the + * stime pending cputime to flush. + */ instrumentation_begin(); - if (vtime_accounting_enabled_this_cpu()) - vtime_guest_enter(current); - else - current->flags |= PF_VCPU; + vtime_account_guest_enter(); instrumentation_end(); - if (context_tracking_enabled()) - __context_tracking_enter(CONTEXT_GUEST); - - /* KVM does not hold any references to rcu protected data when it + /* + * KVM does not hold any references to rcu protected data when it * switches CPU into a guest mode. In fact switching to a guest mode * is very similar to exiting to userspace from rcu point of view. In * addition CPU may stay in a guest mode for quite a long time (up to * one time slice). Lets treat guest mode as quiescent state, just like * we do with user-mode execution. */ - if (!context_tracking_enabled_this_cpu()) { + if (!context_tracking_guest_enter()) { instrumentation_begin(); rcu_virt_note_context_switch(smp_processor_id()); instrumentation_end(); } } -static __always_inline void context_tracking_guest_exit(void) -{ - if (context_tracking_enabled()) - __context_tracking_exit(CONTEXT_GUEST); -} - static __always_inline void guest_exit_irqoff(void) { context_tracking_guest_exit(); - instrumentation_begin(); - vtime_account_guest_exit(); - instrumentation_end(); -} - -#else -static __always_inline void guest_enter_irqoff(void) -{ - /* - * This is running in ioctl context so its safe - * to assume that it's the stime pending cputime - * to flush. - */ - instrumentation_begin(); - vtime_account_guest_enter(); - rcu_virt_note_context_switch(smp_processor_id()); - instrumentation_end(); -} - -static __always_inline void context_tracking_guest_exit(void) { } - -static __always_inline void guest_exit_irqoff(void) -{ instrumentation_begin(); /* Flush the guest cputime we spent on the guest */ vtime_account_guest_exit(); instrumentation_end(); } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ static inline void guest_exit(void) { -- cgit v1.2.3-71-gd317 From 1ca0016c149be35fe19a6b75fce95c25807b7159 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 May 2021 17:27:34 -0700 Subject: context_tracking: KVM: Move guest enter/exit wrappers to KVM's domain Move the guest enter/exit wrappers to kvm_host.h so that KVM can manage its context tracking vs. vtime accounting without bleeding too many KVM details into the context tracking code. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210505002735.1684165-8-seanjc@google.com --- include/linux/context_tracking.h | 45 ---------------------------------------- include/linux/kvm_host.h | 45 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index aa58c2ac67ca..4d7fced3a39f 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -118,49 +118,4 @@ extern void context_tracking_init(void); static inline void context_tracking_init(void) { } #endif /* CONFIG_CONTEXT_TRACKING_FORCE */ -/* must be called with irqs disabled */ -static __always_inline void guest_enter_irqoff(void) -{ - /* - * This is running in ioctl context so its safe to assume that it's the - * stime pending cputime to flush. - */ - instrumentation_begin(); - vtime_account_guest_enter(); - instrumentation_end(); - - /* - * KVM does not hold any references to rcu protected data when it - * switches CPU into a guest mode. In fact switching to a guest mode - * is very similar to exiting to userspace from rcu point of view. In - * addition CPU may stay in a guest mode for quite a long time (up to - * one time slice). Lets treat guest mode as quiescent state, just like - * we do with user-mode execution. - */ - if (!context_tracking_guest_enter()) { - instrumentation_begin(); - rcu_virt_note_context_switch(smp_processor_id()); - instrumentation_end(); - } -} - -static __always_inline void guest_exit_irqoff(void) -{ - context_tracking_guest_exit(); - - instrumentation_begin(); - /* Flush the guest cputime we spent on the guest */ - vtime_account_guest_exit(); - instrumentation_end(); -} - -static inline void guest_exit(void) -{ - unsigned long flags; - - local_irq_save(flags); - guest_exit_irqoff(); - local_irq_restore(flags); -} - #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8895b95b6a22..2f34487e21f2 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -338,6 +338,51 @@ struct kvm_vcpu { struct kvm_dirty_ring dirty_ring; }; +/* must be called with irqs disabled */ +static __always_inline void guest_enter_irqoff(void) +{ + /* + * This is running in ioctl context so its safe to assume that it's the + * stime pending cputime to flush. + */ + instrumentation_begin(); + vtime_account_guest_enter(); + instrumentation_end(); + + /* + * KVM does not hold any references to rcu protected data when it + * switches CPU into a guest mode. In fact switching to a guest mode + * is very similar to exiting to userspace from rcu point of view. In + * addition CPU may stay in a guest mode for quite a long time (up to + * one time slice). Lets treat guest mode as quiescent state, just like + * we do with user-mode execution. + */ + if (!context_tracking_guest_enter()) { + instrumentation_begin(); + rcu_virt_note_context_switch(smp_processor_id()); + instrumentation_end(); + } +} + +static __always_inline void guest_exit_irqoff(void) +{ + context_tracking_guest_exit(); + + instrumentation_begin(); + /* Flush the guest cputime we spent on the guest */ + vtime_account_guest_exit(); + instrumentation_end(); +} + +static inline void guest_exit(void) +{ + unsigned long flags; + + local_irq_save(flags); + guest_exit_irqoff(); + local_irq_restore(flags); +} + static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { /* -- cgit v1.2.3-71-gd317 From 1139aeb1c521eb4a050920ce6c64c36c4f2a3ab7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 5 May 2021 23:12:42 +0200 Subject: smp: Fix smp_call_function_single_async prototype As of commit 966a967116e6 ("smp: Avoid using two cache lines for struct call_single_data"), the smp code prefers 32-byte aligned call_single_data objects for performance reasons, but the block layer includes an instance of this structure in the main 'struct request' that is more senstive to size than to performance here, see 4ccafe032005 ("block: unalign call_single_data in struct request"). The result is a violation of the calling conventions that clang correctly points out: block/blk-mq.c:630:39: warning: passing 8-byte aligned argument to 32-byte aligned parameter 2 of 'smp_call_function_single_async' may result in an unaligned pointer access [-Walign-mismatch] smp_call_function_single_async(cpu, &rq->csd); It does seem that the usage of the call_single_data without cache line alignment should still be allowed by the smp code, so just change the function prototype so it accepts both, but leave the default alignment unchanged for the other users. This seems better to me than adding a local hack to shut up an otherwise correct warning in the caller. Signed-off-by: Arnd Bergmann Signed-off-by: Peter Zijlstra (Intel) Acked-by: Jens Axboe Link: https://lkml.kernel.org/r/20210505211300.3174456-1-arnd@kernel.org --- include/linux/smp.h | 2 +- kernel/smp.c | 26 +++++++++++++------------- kernel/up.c | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/smp.h b/include/linux/smp.h index 84a0b4828f66..f0d3ef654207 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, void *info, bool wait, const struct cpumask *mask); -int smp_call_function_single_async(int cpu, call_single_data_t *csd); +int smp_call_function_single_async(int cpu, struct __call_single_data *csd); /* * Call a function on all processors diff --git a/kernel/smp.c b/kernel/smp.c index e21074900006..52bf159ec400 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -211,7 +211,7 @@ static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type) } while (0) /* Record current CSD work for current CPU, NULL to erase. */ -static void __csd_lock_record(call_single_data_t *csd) +static void __csd_lock_record(struct __call_single_data *csd) { if (!csd) { smp_mb(); /* NULL cur_csd after unlock. */ @@ -226,13 +226,13 @@ static void __csd_lock_record(call_single_data_t *csd) /* Or before unlock, as the case may be. */ } -static __always_inline void csd_lock_record(call_single_data_t *csd) +static __always_inline void csd_lock_record(struct __call_single_data *csd) { if (static_branch_unlikely(&csdlock_debug_enabled)) __csd_lock_record(csd); } -static int csd_lock_wait_getcpu(call_single_data_t *csd) +static int csd_lock_wait_getcpu(struct __call_single_data *csd) { unsigned int csd_type; @@ -282,7 +282,7 @@ static const char *csd_lock_get_type(unsigned int type) return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type]; } -static void csd_lock_print_extended(call_single_data_t *csd, int cpu) +static void csd_lock_print_extended(struct __call_single_data *csd, int cpu) { struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu); unsigned int srccpu = csd->node.src; @@ -321,7 +321,7 @@ static void csd_lock_print_extended(call_single_data_t *csd, int cpu) * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, * so waiting on other types gets much less information. */ -static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id) +static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id) { int cpu = -1; int cpux; @@ -387,7 +387,7 @@ static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, in * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */ -static void __csd_lock_wait(call_single_data_t *csd) +static void __csd_lock_wait(struct __call_single_data *csd) { int bug_id = 0; u64 ts0, ts1; @@ -401,7 +401,7 @@ static void __csd_lock_wait(call_single_data_t *csd) smp_acquire__after_ctrl_dep(); } -static __always_inline void csd_lock_wait(call_single_data_t *csd) +static __always_inline void csd_lock_wait(struct __call_single_data *csd) { if (static_branch_unlikely(&csdlock_debug_enabled)) { __csd_lock_wait(csd); @@ -431,17 +431,17 @@ static void __smp_call_single_queue_debug(int cpu, struct llist_node *node) #else #define cfd_seq_store(var, src, dst, type) -static void csd_lock_record(call_single_data_t *csd) +static void csd_lock_record(struct __call_single_data *csd) { } -static __always_inline void csd_lock_wait(call_single_data_t *csd) +static __always_inline void csd_lock_wait(struct __call_single_data *csd) { smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); } #endif -static __always_inline void csd_lock(call_single_data_t *csd) +static __always_inline void csd_lock(struct __call_single_data *csd) { csd_lock_wait(csd); csd->node.u_flags |= CSD_FLAG_LOCK; @@ -454,7 +454,7 @@ static __always_inline void csd_lock(call_single_data_t *csd) smp_wmb(); } -static __always_inline void csd_unlock(call_single_data_t *csd) +static __always_inline void csd_unlock(struct __call_single_data *csd) { WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); @@ -501,7 +501,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node) * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. */ -static int generic_exec_single(int cpu, call_single_data_t *csd) +static int generic_exec_single(int cpu, struct __call_single_data *csd) { if (cpu == smp_processor_id()) { smp_call_func_t func = csd->func; @@ -784,7 +784,7 @@ EXPORT_SYMBOL(smp_call_function_single); * NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. */ -int smp_call_function_single_async(int cpu, call_single_data_t *csd) +int smp_call_function_single_async(int cpu, struct __call_single_data *csd) { int err = 0; diff --git a/kernel/up.c b/kernel/up.c index bf20b4a9af60..c7321307923a 100644 --- a/kernel/up.c +++ b/kernel/up.c @@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, } EXPORT_SYMBOL(smp_call_function_single); -int smp_call_function_single_async(int cpu, call_single_data_t *csd) +int smp_call_function_single_async(int cpu, struct __call_single_data *csd) { unsigned long flags; -- cgit v1.2.3-71-gd317 From d4455faccd6cbe11ddfdbe28723a2122453b4f4e Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 6 May 2021 18:02:16 -0700 Subject: proc: mandate ->proc_lseek in "struct proc_ops" Now that proc_ops are separate from file_operations and other operations it easy to check all instances to have ->proc_lseek hook and remove check in main code. Note: nonseekable_open() files naturally don't require ->proc_lseek. Garbage collect pde_lseek() function. [adobriyan@gmail.com: smoke test lseek()] Link: https://lkml.kernel.org/r/YG4OIhChOrVTPgdN@localhost.localdomain Link: https://lkml.kernel.org/r/YFYX0Bzwxlc7aBa/@localhost.localdomain Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/isdn/capi/kcapi_proc.c | 1 + drivers/net/wireless/intersil/hostap/hostap_proc.c | 1 + drivers/scsi/esas2r/esas2r_main.c | 1 + fs/proc/inode.c | 14 ++------------ include/linux/proc_fs.h | 1 + tools/testing/selftests/proc/read.c | 4 +++- 6 files changed, 9 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c index b5ed4ea145cb..77e951206809 100644 --- a/drivers/isdn/capi/kcapi_proc.c +++ b/drivers/isdn/capi/kcapi_proc.c @@ -201,6 +201,7 @@ static ssize_t empty_read(struct file *file, char __user *buf, static const struct proc_ops empty_proc_ops = { .proc_read = empty_read, + .proc_lseek = default_llseek, }; // --------------------------------------------------------------------------- diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c index 97c270845fd1..51c847d98755 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_proc.c +++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c @@ -227,6 +227,7 @@ static ssize_t prism2_aux_dump_proc_no_read(struct file *file, char __user *buf, static const struct proc_ops prism2_aux_dump_proc_ops = { .proc_read = prism2_aux_dump_proc_no_read, + .proc_lseek = default_llseek, }; diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 5d9eeac6717a..45ec9f16c085 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -616,6 +616,7 @@ static const struct file_operations esas2r_proc_fops = { }; static const struct proc_ops esas2r_proc_ops = { + .proc_lseek = default_llseek, .proc_ioctl = esas2r_proc_ioctl, #ifdef CONFIG_COMPAT .proc_compat_ioctl = compat_ptr_ioctl, diff --git a/fs/proc/inode.c b/fs/proc/inode.c index bde6b6f69852..2ea8aaa7206e 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -273,25 +273,15 @@ void proc_entry_rundown(struct proc_dir_entry *de) spin_unlock(&de->pde_unload_lock); } -static loff_t pde_lseek(struct proc_dir_entry *pde, struct file *file, loff_t offset, int whence) -{ - typeof_member(struct proc_ops, proc_lseek) lseek; - - lseek = pde->proc_ops->proc_lseek; - if (!lseek) - lseek = default_llseek; - return lseek(file, offset, whence); -} - static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) { struct proc_dir_entry *pde = PDE(file_inode(file)); loff_t rv = -EINVAL; if (pde_is_permanent(pde)) { - return pde_lseek(pde, file, offset, whence); + return pde->proc_ops->proc_lseek(file, offset, whence); } else if (use_pde(pde)) { - rv = pde_lseek(pde, file, offset, whence); + rv = pde->proc_ops->proc_lseek(file, offset, whence); unuse_pde(pde); } return rv; diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 000cc0533c33..069c7fd95396 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -32,6 +32,7 @@ struct proc_ops { ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *); ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *); + /* mandatory unless nonseekable_open() or equivalent is used */ loff_t (*proc_lseek)(struct file *, loff_t, int); int (*proc_release)(struct inode *, struct file *); __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); diff --git a/tools/testing/selftests/proc/read.c b/tools/testing/selftests/proc/read.c index b3ef9e14d6cc..35ee78dff144 100644 --- a/tools/testing/selftests/proc/read.c +++ b/tools/testing/selftests/proc/read.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // Test -// 1) read of every file in /proc +// 1) read and lseek on every file in /proc // 2) readlink of every symlink in /proc // 3) recursively (1) + (2) for every directory in /proc // 4) write to /proc/*/clear_refs and /proc/*/task/*/clear_refs @@ -45,6 +45,8 @@ static void f_reg(DIR *d, const char *filename) fd = openat(dirfd(d), filename, O_RDONLY|O_NONBLOCK); if (fd == -1) return; + /* struct proc_ops::proc_lseek is mandatory if file is seekable. */ + (void)lseek(fd, 0, SEEK_SET); rv = read(fd, buf, sizeof(buf)); assert((0 <= rv && rv <= sizeof(buf)) || rv == -1); close(fd); -- cgit v1.2.3-71-gd317 From 4ee60ec156d91c315d1f62dfc1bc5799dcc6b473 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 6 May 2021 18:02:27 -0700 Subject: include: remove pagemap.h from blkdev.h My UEK-derived config has 1030 files depending on pagemap.h before this change. Afterwards, just 326 files need to be rebuilt when I touch pagemap.h. I think blkdev.h is probably included too widely, but untangling that dependency is harder and this solves my problem. x86 allmodconfig builds, but there may be implicit include problems on other architectures. Link: https://lkml.kernel.org/r/20210309195747.283796-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Dan Williams [nvdimm] Acked-by: Jens Axboe [block] Reviewed-by: Christoph Hellwig Acked-by: Coly Li [bcache] Acked-by: Martin K. Petersen [scsi] Reviewed-by: William Kucharski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- block/blk-settings.c | 1 + drivers/block/brd.c | 1 + drivers/block/loop.c | 1 + drivers/md/bcache/super.c | 1 + drivers/nvdimm/btt.c | 1 + drivers/nvdimm/pmem.c | 1 + drivers/scsi/scsicam.c | 1 + include/linux/blkdev.h | 1 - include/linux/swap.h | 1 + 9 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/block/blk-settings.c b/block/blk-settings.c index 9c009090c4b5..902c40d67120 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 6e622c1327ee..7562cf30b14e 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/block/loop.c b/drivers/block/loop.c index a370cde3ddd4..d58d68f3c7cd 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 2b6d6e9cd680..bea8c4429ae8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -16,6 +16,7 @@ #include "features.h" #include +#include #include #include #include diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 41aa1f01fc07..18a267d5073f 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 7daac795db39..ed10a8b66068 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c index f1553a453616..0ffdb8f2995f 100644 --- a/drivers/scsi/scsicam.c +++ b/drivers/scsi/scsicam.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b91ba6207365..1255823b2bc0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/swap.h b/include/linux/swap.h index f69e0f67651d..144727041e78 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3-71-gd317 From 08c5188ef40ff82aed559123dc0ab2d2254b1b1c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 6 May 2021 18:02:30 -0700 Subject: kernel.h: drop inclusion in bitmap.h The bitmap.h header is used in a lot of code around the kernel. Besides that it includes kernel.h which sometimes makes a loop. The problem here is many unneeded loops that make header hell dependencies. For example, how may you move bitmap_zalloc() from C-file to the header? Currently it's impossible. And bitmap.h here is only the tip of an iceberg. kerne.h is a dump of everything that even has nothing in common at all. We may still have it, but in my new code I prefer to include only the headers that I want to use, without the bulk of unneeded kernel code. Break the loop by introducing align.h, including it in kernel.h and bitmap.h followed by replacing kernel.h with limits.h. Link: https://lkml.kernel.org/r/20210326170347.37441-1-andriy.shevchenko@linux.intel.com Signed-off-by: Andy Shevchenko Acked-by: Yury Norov Cc: Rasmus Villemoes Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/align.h | 15 +++++++++++++++ include/linux/bitmap.h | 3 ++- include/linux/kernel.h | 9 +-------- 3 files changed, 18 insertions(+), 9 deletions(-) create mode 100644 include/linux/align.h (limited to 'include/linux') diff --git a/include/linux/align.h b/include/linux/align.h new file mode 100644 index 000000000000..2b4acec7b95a --- /dev/null +++ b/include/linux/align.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ALIGN_H +#define _LINUX_ALIGN_H + +#include + +/* @a is a power of 2 value */ +#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a))) +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +#endif /* _LINUX_ALIGN_H */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 70a932470b2d..6cbcd9d9edd2 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -4,10 +4,11 @@ #ifndef __ASSEMBLY__ +#include #include #include +#include #include -#include /* * bitmaps provide bit arrays that consume one or more unsigned diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5b7ed6dc99ac..09035ac67d4b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -3,6 +3,7 @@ #define _LINUX_KERNEL_H #include +#include #include #include #include @@ -30,14 +31,6 @@ */ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) -/* @a is a power of 2 value */ -#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) -#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) -#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) -#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) -#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a))) -#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) - /* generic data direction definitions */ #define READ 0 #define WRITE 1 -- cgit v1.2.3-71-gd317 From 112dfce8f29798192eb0be8066b54f4a68f4eb36 Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Thu, 6 May 2021 18:02:33 -0700 Subject: linux/profile.h: remove unnecessary declaration Declaring struct pt_regs is unnecessary. On the one hand, there is no function using it; on the other hand, struct pt_regs has been declared in linux/kernel.h. Remove them. Link: https://lkml.kernel.org/r/20210401104834.1009157-1-wanjiabing@vivo.com Signed-off-by: Wan Jiabing Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/profile.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/profile.h b/include/linux/profile.h index bad18ca43150..fd18ca96f557 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -15,7 +15,6 @@ #define KVM_PROFILING 4 struct proc_dir_entry; -struct pt_regs; struct notifier_block; #if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) @@ -84,8 +83,6 @@ int task_handoff_unregister(struct notifier_block * n); int profile_event_register(enum profile_type, struct notifier_block * n); int profile_event_unregister(enum profile_type, struct notifier_block * n); -struct pt_regs; - #else #define prof_on 0 -- cgit v1.2.3-71-gd317 From 32c93976ac2ee7ecb4b09cc032efe1445d37bd7e Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Thu, 6 May 2021 18:02:39 -0700 Subject: kernel/cred.c: make init_groups static init_groups is declared in both cred.h and init_task.h, but it is not actually referenced anywhere outside of cred.c where it is defined. So make it static and remove the declarations. Link: https://lkml.kernel.org/r/20210310220102.2484201-1-linux@rasmusvillemoes.dk Signed-off-by: Rasmus Villemoes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cred.h | 1 - include/linux/init_task.h | 1 - kernel/cred.c | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cred.h b/include/linux/cred.h index ac0e5f97d7d8..14971322e1a0 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -53,7 +53,6 @@ do { \ groups_free(group_info); \ } while (0) -extern struct group_info init_groups; #ifdef CONFIG_MULTIUSER extern struct group_info *groups_alloc(int); extern void groups_free(struct group_info *); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b2412b4d4c20..40fc5813cf93 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -25,7 +25,6 @@ extern struct files_struct init_files; extern struct fs_struct init_fs; extern struct nsproxy init_nsproxy; -extern struct group_info init_groups; extern struct cred init_cred; #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE diff --git a/kernel/cred.c b/kernel/cred.c index 421b1149c651..e1d274cd741b 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -33,7 +33,7 @@ do { \ static struct kmem_cache *cred_jar; /* init to 2 - one for init_task, one to ensure it is never freed */ -struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; +static struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; /* * The initial credentials for the initial task -- cgit v1.2.3-71-gd317 From 586eaebea5988302c5a8b018096dd6c6f4564940 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Thu, 6 May 2021 18:02:56 -0700 Subject: lib: extend the scope of small_const_nbits() macro find_bit would also benefit from small_const_nbits() optimizations. The detailed comment is provided by Rasmus Villemoes. Link: https://lkml.kernel.org/r/20210401003153.97325-6-yury.norov@gmail.com Suggested-by: Rasmus Villemoes Signed-off-by: Yury Norov Acked-by: Rasmus Villemoes Acked-by: Andy Shevchenko Cc: Alexey Klimov Cc: Arnd Bergmann Cc: David Sterba Cc: Dennis Zhou Cc: Geert Uytterhoeven Cc: Jianpeng Ma Cc: Joe Perches Cc: John Paul Adrian Glaubitz Cc: Josh Poimboeuf Cc: Rich Felker Cc: Stefano Brivio Cc: Wei Yang Cc: Wolfram Sang Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/bitsperlong.h | 12 ++++++++++++ include/linux/bitmap.h | 8 -------- 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h index 3905c1c93dc2..1023e2a4bd37 100644 --- a/include/asm-generic/bitsperlong.h +++ b/include/asm-generic/bitsperlong.h @@ -23,4 +23,16 @@ #define BITS_PER_LONG_LONG 64 #endif +/* + * small_const_nbits(n) is true precisely when it is known at compile-time + * that BITMAP_SIZE(n) is 1, i.e. 1 <= n <= BITS_PER_LONG. This allows + * various bit/bitmap APIs to provide a fast inline implementation. Bitmaps + * of size 0 are very rare, and a compile-time-known-size 0 is most likely + * a sign of error. They will be handled correctly by the bit/bitmap APIs, + * but using the out-of-line functions, so that the inline implementations + * can unconditionally dereference the pointer(s). + */ +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) + #endif /* __ASM_GENERIC_BITS_PER_LONG */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 6cbcd9d9edd2..f57f4473bbe4 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -223,14 +223,6 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf, #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) -/* - * The static inlines below do not handle constant nbits==0 correctly, - * so make such users (should any ever turn up) call the out-of-line - * versions. - */ -#define small_const_nbits(nbits) \ - (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) - static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); -- cgit v1.2.3-71-gd317 From 2cc7b6a44ac21d31b398b03f4845c53152070416 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Thu, 6 May 2021 18:03:14 -0700 Subject: lib: add fast path for find_first_*_bit() and find_last_bit() Similarly to bitmap functions, users would benefit if we'll handle a case of small-size bitmaps that fit into a single word. While here, move the find_last_bit() declaration to bitops/find.h where other find_*_bit() functions sit. Link: https://lkml.kernel.org/r/20210401003153.97325-11-yury.norov@gmail.com Signed-off-by: Yury Norov Acked-by: Rasmus Villemoes Acked-by: Andy Shevchenko Cc: Alexey Klimov Cc: Arnd Bergmann Cc: David Sterba Cc: Dennis Zhou Cc: Geert Uytterhoeven Cc: Jianpeng Ma Cc: Joe Perches Cc: John Paul Adrian Glaubitz Cc: Josh Poimboeuf Cc: Rich Felker Cc: Stefano Brivio Cc: Wei Yang Cc: Wolfram Sang Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/bitops/find.h | 50 +++++++++++++++++++++++++++++++++++---- include/linux/bitops.h | 12 ---------- lib/find_bit.c | 12 +++++----- 3 files changed, 52 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 4148c74a1e4d..0d132ee2a291 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -5,6 +5,9 @@ extern unsigned long _find_next_bit(const unsigned long *addr1, const unsigned long *addr2, unsigned long nbits, unsigned long start, unsigned long invert, unsigned long le); +extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size); +extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); +extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); #ifndef find_next_bit /** @@ -102,8 +105,17 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, * Returns the bit number of the first set bit. * If no bits are set, returns @size. */ -extern unsigned long find_first_bit(const unsigned long *addr, - unsigned long size); +static inline +unsigned long find_first_bit(const unsigned long *addr, unsigned long size) +{ + if (small_const_nbits(size)) { + unsigned long val = *addr & GENMASK(size - 1, 0); + + return val ? __ffs(val) : size; + } + + return _find_first_bit(addr, size); +} /** * find_first_zero_bit - find the first cleared bit in a memory region @@ -113,8 +125,17 @@ extern unsigned long find_first_bit(const unsigned long *addr, * Returns the bit number of the first cleared bit. * If no bits are zero, returns @size. */ -extern unsigned long find_first_zero_bit(const unsigned long *addr, - unsigned long size); +static inline +unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) +{ + if (small_const_nbits(size)) { + unsigned long val = *addr | ~GENMASK(size - 1, 0); + + return val == ~0UL ? size : ffz(val); + } + + return _find_first_zero_bit(addr, size); +} #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifndef find_first_bit @@ -126,6 +147,27 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr, #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ +#ifndef find_last_bit +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The number of bits to search + * + * Returns the bit number of the last set bit, or size. + */ +static inline +unsigned long find_last_bit(const unsigned long *addr, unsigned long size) +{ + if (small_const_nbits(size)) { + unsigned long val = *addr & GENMASK(size - 1, 0); + + return val ? __fls(val) : size; + } + + return _find_last_bit(addr, size); +} +#endif + /** * find_next_clump8 - find next 8-bit clump with set bits in a memory region * @clump: location to store copy of found clump diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a5a48303b0f1..26bf15e6cd35 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -286,17 +286,5 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, }) #endif -#ifndef find_last_bit -/** - * find_last_bit - find the last set bit in a memory region - * @addr: The address to start the search at - * @size: The number of bits to search - * - * Returns the bit number of the last set bit, or size. - */ -extern unsigned long find_last_bit(const unsigned long *addr, - unsigned long size); -#endif - #endif /* __KERNEL__ */ #endif diff --git a/lib/find_bit.c b/lib/find_bit.c index b03a101367f8..0f8e2e369b1d 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -75,7 +75,7 @@ EXPORT_SYMBOL(_find_next_bit); /* * Find the first set bit in a memory region. */ -unsigned long find_first_bit(const unsigned long *addr, unsigned long size) +unsigned long _find_first_bit(const unsigned long *addr, unsigned long size) { unsigned long idx; @@ -86,14 +86,14 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size) return size; } -EXPORT_SYMBOL(find_first_bit); +EXPORT_SYMBOL(_find_first_bit); #endif #ifndef find_first_zero_bit /* * Find the first cleared bit in a memory region. */ -unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) +unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size) { unsigned long idx; @@ -104,11 +104,11 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) return size; } -EXPORT_SYMBOL(find_first_zero_bit); +EXPORT_SYMBOL(_find_first_zero_bit); #endif #ifndef find_last_bit -unsigned long find_last_bit(const unsigned long *addr, unsigned long size) +unsigned long _find_last_bit(const unsigned long *addr, unsigned long size) { if (size) { unsigned long val = BITMAP_LAST_WORD_MASK(size); @@ -124,7 +124,7 @@ unsigned long find_last_bit(const unsigned long *addr, unsigned long size) } return size; } -EXPORT_SYMBOL(find_last_bit); +EXPORT_SYMBOL(_find_last_bit); #endif unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr, -- cgit v1.2.3-71-gd317 From e18baa7cc3598999317d6c2fe255756f6b3b7562 Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Thu, 6 May 2021 18:03:37 -0700 Subject: lib: crc8: pointer to data block should be const crc8() does not change the data passed to it, so the pointer argument should be declared const. This avoids callers that receive const data having to cast it to a non-const pointer to call crc8(). Link: https://lkml.kernel.org/r/20210329122409.3291-1-rf@opensource.cirrus.com Signed-off-by: Richard Fitzgerald Acked-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/crc8.h | 2 +- lib/crc8.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/crc8.h b/include/linux/crc8.h index 13c8dabb0441..674045c59a04 100644 --- a/include/linux/crc8.h +++ b/include/linux/crc8.h @@ -96,6 +96,6 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); * Williams, Ross N., rossross.net * (see URL http://www.ross.net/crc/download/crc_v3.txt). */ -u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc); +u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc); #endif /* __CRC8_H_ */ diff --git a/lib/crc8.c b/lib/crc8.c index 595a5a75e3cd..1ad8e501d9b6 100644 --- a/lib/crc8.c +++ b/lib/crc8.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL(crc8_populate_lsb); * @nbytes: number of bytes in data buffer. * @crc: previous returned crc8 value. */ -u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc) +u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc) { /* loop over the buffer data */ while (nbytes-- > 0) -- cgit v1.2.3-71-gd317 From e13d04ec45b07388d3c38c0e18a4d0aa4841b0c3 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 6 May 2021 18:03:52 -0700 Subject: include/linux/compat.h: remove unneeded declaration from COMPAT_SYSCALL_DEFINEx() compat_sys##name is declared twice, just one line below. With this removal SYSCALL_DEFINEx() (defined in ) and COMPAT_SYSCALL_DEFINEx() look symmetrical. Link: https://lkml.kernel.org/r/20210223114924.854794-1-masahiroy@kernel.org Signed-off-by: Masahiro Yamada Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compat.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/compat.h b/include/linux/compat.h index f0d2dd35d408..cfac4ec9a7df 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -75,7 +75,6 @@ __diag_push(); \ __diag_ignore(GCC, 8, "-Wattribute-alias", \ "Type aliasing is used to sanitize syscall arguments");\ - asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ __attribute__((alias(__stringify(__se_compat_sys##name)))); \ ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \ -- cgit v1.2.3-71-gd317 From 6f1f942cd5fbbe308f912fc84e3f10fbc8113a68 Mon Sep 17 00:00:00 2001 From: He Ying Date: Thu, 6 May 2021 18:04:57 -0700 Subject: smp: kernel/panic.c - silence warnings We found these warnings in kernel/panic.c by using sparse tool: warning: symbol 'panic_smp_self_stop' was not declared. warning: symbol 'nmi_panic_self_stop' was not declared. warning: symbol 'crash_smp_send_stop' was not declared. To avoid them, add declarations for these three functions in include/linux/smp.h. Link: https://lkml.kernel.org/r/20210316084150.75201-1-heying24@huawei.com Signed-off-by: He Ying Reported-by: Hulk Robot Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/smp.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/smp.h b/include/linux/smp.h index 84a0b4828f66..669e35c03be2 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -55,6 +55,14 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, int smp_call_function_single_async(int cpu, call_single_data_t *csd); +/* + * Cpus stopping functions in panic. All have default weak definitions. + * Architecture-dependent code may override them. + */ +void panic_smp_self_stop(void); +void nmi_panic_self_stop(struct pt_regs *regs); +void crash_smp_send_stop(void); + /* * Call a function on all processors */ -- cgit v1.2.3-71-gd317 From 3d1c7fd97e4c5e54034231cd11319079dfaed60e Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Thu, 6 May 2021 18:05:00 -0700 Subject: delayacct: clear right task's flag after blkio completes When I was implementing a latency analyzer tool by using task->delays and other things, I found an issue in delayacct. The issue is it should clear the target's flag instead of current's in delayacct_blkio_end(). When I git blame delayacct, I found there're some similar issues we have fixed in delayacct_blkio_end(). - Commit c96f5471ce7d ("delayacct: Account blkio completion on the correct task") fixed the issue that it should account blkio completion on the target task instead of current. - Commit b512719f771a ("delayacct: fix crash in delayacct_blkio_end() after delayacct init failure") fixed the issue that it should check target task's delays instead of current task'. It seems that delayacct_blkio_{begin, end} are error prone. So I introduce a new paratmeter - the target task 'p' - to these helpers. After that change, the callsite will specifilly set the right task, which should make it less error prone. Link: https://lkml.kernel.org/r/20210414083720.24083-1-laoar.shao@gmail.com Signed-off-by: Yafang Shao Cc: Tejun Heo Cc: Josh Snyder Cc: Jens Axboe Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/delayacct.h | 20 ++++++++++---------- mm/memory.c | 8 ++++---- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 2d3bdcccf5eb..21651f946751 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -82,16 +82,16 @@ static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) return 0; } -static inline void delayacct_set_flag(int flag) +static inline void delayacct_set_flag(struct task_struct *p, int flag) { - if (current->delays) - current->delays->flags |= flag; + if (p->delays) + p->delays->flags |= flag; } -static inline void delayacct_clear_flag(int flag) +static inline void delayacct_clear_flag(struct task_struct *p, int flag) { - if (current->delays) - current->delays->flags &= ~flag; + if (p->delays) + p->delays->flags &= ~flag; } static inline void delayacct_tsk_init(struct task_struct *tsk) @@ -114,7 +114,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk) static inline void delayacct_blkio_start(void) { - delayacct_set_flag(DELAYACCT_PF_BLKIO); + delayacct_set_flag(current, DELAYACCT_PF_BLKIO); if (current->delays) __delayacct_blkio_start(); } @@ -123,7 +123,7 @@ static inline void delayacct_blkio_end(struct task_struct *p) { if (p->delays) __delayacct_blkio_end(p); - delayacct_clear_flag(DELAYACCT_PF_BLKIO); + delayacct_clear_flag(p, DELAYACCT_PF_BLKIO); } static inline int delayacct_add_tsk(struct taskstats *d, @@ -166,9 +166,9 @@ static inline void delayacct_thrashing_end(void) } #else -static inline void delayacct_set_flag(int flag) +static inline void delayacct_set_flag(struct task_struct *p, int flag) {} -static inline void delayacct_clear_flag(int flag) +static inline void delayacct_clear_flag(struct task_struct *p, int flag) {} static inline void delayacct_init(void) {} diff --git a/mm/memory.c b/mm/memory.c index cbdc2cd9cedb..8c491f813687 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3339,7 +3339,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } - delayacct_set_flag(DELAYACCT_PF_SWAPIN); + delayacct_set_flag(current, DELAYACCT_PF_SWAPIN); page = lookup_swap_cache(entry, vma, vmf->address); swapcache = page; @@ -3388,7 +3388,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vmf->address, &vmf->ptl); if (likely(pte_same(*vmf->pte, vmf->orig_pte))) ret = VM_FAULT_OOM; - delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN); goto unlock; } @@ -3402,13 +3402,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * owner processes (which may be unknown at hwpoison time) */ ret = VM_FAULT_HWPOISON; - delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN); goto out_release; } locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); - delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN); if (!locked) { ret |= VM_FAULT_RETRY; goto out_release; -- cgit v1.2.3-71-gd317 From a065c0faacb1e472cd4e048986407d1b177373a2 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Thu, 6 May 2021 18:05:39 -0700 Subject: kernel/async.c: remove async_unregister_domain() No callers in the tree. Link: https://lkml.kernel.org/r/20210309151723.1907838-2-linux@rasmusvillemoes.dk Signed-off-by: Rasmus Villemoes Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/async.h | 1 - kernel/async.c | 18 ------------------ 2 files changed, 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/async.h b/include/linux/async.h index 0a17cd27f348..cce4ad31e8fc 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -112,7 +112,6 @@ async_schedule_dev_domain(async_func_t func, struct device *dev, return async_schedule_node_domain(func, dev, dev_to_node(dev), domain); } -void async_unregister_domain(struct async_domain *domain); extern void async_synchronize_full(void); extern void async_synchronize_full_domain(struct async_domain *domain); extern void async_synchronize_cookie(async_cookie_t cookie); diff --git a/kernel/async.c b/kernel/async.c index 4b5971142922..b8d7a663497f 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -245,24 +245,6 @@ void async_synchronize_full(void) } EXPORT_SYMBOL_GPL(async_synchronize_full); -/** - * async_unregister_domain - ensure no more anonymous waiters on this domain - * @domain: idle domain to flush out of any async_synchronize_full instances - * - * async_synchronize_{cookie|full}_domain() are not flushed since callers - * of these routines should know the lifetime of @domain - * - * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing - */ -void async_unregister_domain(struct async_domain *domain) -{ - spin_lock_irq(&async_lock); - WARN_ON(!domain->registered || !list_empty(&domain->pending)); - domain->registered = 0; - spin_unlock_irq(&async_lock); -} -EXPORT_SYMBOL_GPL(async_unregister_domain); - /** * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain * @domain: the domain to synchronize -- cgit v1.2.3-71-gd317 From e7cb072eb988e46295512617c39d004f9e1c26f8 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Thu, 6 May 2021 18:05:42 -0700 Subject: init/initramfs.c: do unpacking asynchronously Patch series "background initramfs unpacking, and CONFIG_MODPROBE_PATH", v3. These two patches are independent, but better-together. The second is a rather trivial patch that simply allows the developer to change "/sbin/modprobe" to something else - e.g. the empty string, so that all request_module() during early boot return -ENOENT early, without even spawning a usermode helper, needlessly synchronizing with the initramfs unpacking. The first patch delegates decompressing the initramfs to a worker thread, allowing do_initcalls() in main.c to proceed to the device_ and late_ initcalls without waiting for that decompression (and populating of rootfs) to finish. Obviously, some of those later calls may rely on the initramfs being available, so I've added synchronization points in the firmware loader and usermodehelper paths - there might be other places that would need this, but so far no one has been able to think of any places I have missed. There's not much to win if most of the functionality needed during boot is only available as modules. But systems with a custom-made .config and initramfs can boot faster, partly due to utilizing more than one cpu earlier, partly by avoiding known-futile modprobe calls (which would still trigger synchronization with the initramfs unpacking, thus eliminating most of the first benefit). This patch (of 2): Most of the boot process doesn't actually need anything from the initramfs, until of course PID1 is to be executed. So instead of doing the decompressing and populating of the initramfs synchronously in populate_rootfs() itself, push that off to a worker thread. This is primarily motivated by an embedded ppc target, where unpacking even the rather modest sized initramfs takes 0.6 seconds, which is long enough that the external watchdog becomes unhappy that it doesn't get attention soon enough. By doing the initramfs decompression in a worker thread, we get to do the device_initcalls and hence start petting the watchdog much sooner. Normal desktops might benefit as well. On my mostly stock Ubuntu kernel, my initramfs is a 26M xz-compressed blob, decompressing to around 126M. That takes almost two seconds: [ 0.201454] Trying to unpack rootfs image as initramfs... [ 1.976633] Freeing initrd memory: 29416K Before this patch, these lines occur consecutively in dmesg. With this patch, the timestamps on these two lines is roughly the same as above, but with 172 lines inbetween - so more than one cpu has been kept busy doing work that would otherwise only happen after the populate_rootfs() finished. Should one of the initcalls done after rootfs_initcall time (i.e., device_ and late_ initcalls) need something from the initramfs (say, a kernel module or a firmware blob), it will simply wait for the initramfs unpacking to be done before proceeding, which should in theory make this completely safe. But if some driver pokes around in the filesystem directly and not via one of the official kernel interfaces (i.e. request_firmware*(), call_usermodehelper*) that theory may not hold - also, I certainly might have missed a spot when sprinkling wait_for_initramfs(). So there is an escape hatch in the form of an initramfs_async= command line parameter. Link: https://lkml.kernel.org/r/20210313212528.2956377-1-linux@rasmusvillemoes.dk Link: https://lkml.kernel.org/r/20210313212528.2956377-2-linux@rasmusvillemoes.dk Signed-off-by: Rasmus Villemoes Reviewed-by: Luis Chamberlain Cc: Jessica Yu Cc: Borislav Petkov Cc: Jonathan Corbet Cc: Greg Kroah-Hartman Cc: Nick Desaulniers Cc: Takashi Iwai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/admin-guide/kernel-parameters.txt | 12 ++++++++ drivers/base/firmware_loader/main.c | 2 ++ include/linux/initrd.h | 2 ++ init/initramfs.c | 38 ++++++++++++++++++++++++- init/main.c | 1 + kernel/umh.c | 2 ++ 6 files changed, 56 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d93fbc1c1917..7866cc1bd4a9 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1833,6 +1833,18 @@ initcall functions. Useful for debugging built-in modules and initcalls. + initramfs_async= [KNL] + Format: + Default: 1 + This parameter controls whether the initramfs + image is unpacked asynchronously, concurrently + with devices being probed and + initialized. This should normally just work, + but as a debugging aid, one can get the + historical behaviour of the initramfs + unpacking being completed before device_ and + late_ initcalls. + initrd= [BOOT] Specify the location of the initial ramdisk initrdmem= [KNL] Specify a physical address and size from which to diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 78355095e00d..4fdb8219cd08 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -504,6 +505,7 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, if (!path) return -ENOMEM; + wait_for_initramfs(); for (i = 0; i < ARRAY_SIZE(fw_path); i++) { size_t file_size = 0; size_t *file_size_ptr = NULL; diff --git a/include/linux/initrd.h b/include/linux/initrd.h index 85c15717af34..1bbe9af48dc3 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -20,8 +20,10 @@ extern void free_initrd_mem(unsigned long, unsigned long); #ifdef CONFIG_BLK_DEV_INITRD extern void __init reserve_initrd_mem(void); +extern void wait_for_initramfs(void); #else static inline void __init reserve_initrd_mem(void) {} +static inline void wait_for_initramfs(void) {} #endif extern phys_addr_t phys_initrd_start; diff --git a/init/initramfs.c b/init/initramfs.c index d677e8e717f1..af27abc59643 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include #include @@ -541,6 +542,14 @@ static int __init keepinitrd_setup(char *__unused) __setup("keepinitrd", keepinitrd_setup); #endif +static bool __initdata initramfs_async = true; +static int __init initramfs_async_setup(char *str) +{ + strtobool(str, &initramfs_async); + return 1; +} +__setup("initramfs_async=", initramfs_async_setup); + extern char __initramfs_start[]; extern unsigned long __initramfs_size; #include @@ -658,7 +667,7 @@ static void __init populate_initrd_image(char *err) } #endif /* CONFIG_BLK_DEV_RAM */ -static int __init populate_rootfs(void) +static void __init do_populate_rootfs(void *unused, async_cookie_t cookie) { /* Load the built in initramfs */ char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size); @@ -693,6 +702,33 @@ done: initrd_end = 0; flush_delayed_fput(); +} + +static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain); +static async_cookie_t initramfs_cookie; + +void wait_for_initramfs(void) +{ + if (!initramfs_cookie) { + /* + * Something before rootfs_initcall wants to access + * the filesystem/initramfs. Probably a bug. Make a + * note, avoid deadlocking the machine, and let the + * caller's access fail as it used to. + */ + pr_warn_once("wait_for_initramfs() called before rootfs_initcalls\n"); + return; + } + async_synchronize_cookie_domain(initramfs_cookie + 1, &initramfs_domain); +} +EXPORT_SYMBOL_GPL(wait_for_initramfs); + +static int __init populate_rootfs(void) +{ + initramfs_cookie = async_schedule_domain(do_populate_rootfs, NULL, + &initramfs_domain); + if (!initramfs_async) + wait_for_initramfs(); return 0; } rootfs_initcall(populate_rootfs); diff --git a/init/main.c b/init/main.c index dd11bfd10ead..11d34ccf5786 100644 --- a/init/main.c +++ b/init/main.c @@ -1561,6 +1561,7 @@ static noinline void __init kernel_init_freeable(void) kunit_run_all_tests(); + wait_for_initramfs(); console_on_rootfs(); /* diff --git a/kernel/umh.c b/kernel/umh.c index 3f646613a9d3..61f6b82c354b 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -107,6 +108,7 @@ static int call_usermodehelper_exec_async(void *data) commit_creds(new); + wait_for_initramfs(); retval = kernel_execve(sub_info->path, (const char *const *)sub_info->argv, (const char *const *)sub_info->envp); -- cgit v1.2.3-71-gd317 From cb152a1a95606aadd81df7a537dde9ef16da4b80 Mon Sep 17 00:00:00 2001 From: Shijie Luo Date: Thu, 6 May 2021 18:05:51 -0700 Subject: mm: fix some typos and code style problems fix some typos and code style problems in mm. gfp.h: s/MAXNODES/MAX_NUMNODES mmzone.h: s/then/than rmap.c: s/__vma_split()/__vma_adjust() swap.c: s/__mod_zone_page_stat/__mod_zone_page_state, s/is is/is swap_state.c: s/whoes/whose z3fold.c: code style problem fix in z3fold_unregister_migration zsmalloc.c: s/of/or, s/give/given Link: https://lkml.kernel.org/r/20210419083057.64820-1-luoshijie1@huawei.com Signed-off-by: Shijie Luo Signed-off-by: Miaohe Lin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 2 +- include/linux/mmzone.h | 2 +- mm/rmap.c | 2 +- mm/swap.c | 4 ++-- mm/swap_state.c | 2 +- mm/z3fold.c | 2 +- mm/zsmalloc.c | 4 ++-- 7 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 8a5f6c3d7dba..11da8af06704 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -490,7 +490,7 @@ static inline int gfp_zonelist(gfp_t flags) /* * We get the zone list from the current node and the gfp_mask. - * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. + * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. * There are two zonelists per node, one for all zones with memory and * one containing just zones from the node the zonelist belongs to. * diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 917bd6c604d5..0d53eba1c383 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -55,7 +55,7 @@ enum migratetype { * pageblocks to MIGRATE_CMA which can be done by * __free_pageblock_cma() function. What is important though * is that a range of pageblocks must be aligned to - * MAX_ORDER_NR_PAGES should biggest page be bigger then + * MAX_ORDER_NR_PAGES should biggest page be bigger than * a single pageblock. */ MIGRATE_CMA, diff --git a/mm/rmap.c b/mm/rmap.c index b0fc27e77d6d..693a610e181d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -257,7 +257,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root) * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. * - * anon_vma_clone() is called by __vma_split(), __split_vma(), copy_vma() and + * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and * anon_vma_fork(). The first three want an exact copy of src, while the last * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, diff --git a/mm/swap.c b/mm/swap.c index a75a8265302b..dfb48cf9c2c9 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -496,7 +496,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page, if (unlikely(unevictable) && !TestSetPageMlocked(page)) { int nr_pages = thp_nr_pages(page); /* - * We use the irq-unsafe __mod_zone_page_stat because this + * We use the irq-unsafe __mod_zone_page_state because this * counter is not modified from interrupt context, and the pte * lock is held(spinlock), which implies preemption disabled. */ @@ -808,7 +808,7 @@ inline void __lru_add_drain_all(bool force_all_cpus) * below which drains the page vectors. * * Let x, y, and z represent some system CPU numbers, where x < y < z. - * Assume CPU #z is is in the middle of the for_each_online_cpu loop + * Assume CPU #z is in the middle of the for_each_online_cpu loop * below and has already reached CPU #y's per-cpu data. CPU #x comes * along, adds some pages to its per-cpu vectors, then calls * lru_add_drain_all(). diff --git a/mm/swap_state.c b/mm/swap_state.c index 3a1259c13f3b..272ea2108c9d 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -792,7 +792,7 @@ static void swap_ra_info(struct vm_fault *vmf, * * Returns the struct page for entry and addr, after queueing swapin. * - * Primitive swap readahead code. We simply read in a few pages whoes + * Primitive swap readahead code. We simply read in a few pages whose * virtual addresses are around the fault address in the same vma. * * Caller must hold read mmap_lock if vmf->vma is not NULL. diff --git a/mm/z3fold.c b/mm/z3fold.c index 9d889ad2bb86..7fe7adaaad01 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -391,7 +391,7 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool) { if (pool->inode) iput(pool->inode); - } +} /* Initializes the z3fold header of a newly allocated z3fold page */ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 58697f7a43f8..5004c176b045 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -61,7 +61,7 @@ #define ZSPAGE_MAGIC 0x58 /* - * This must be power of 2 and greater than of equal to sizeof(link_free). + * This must be power of 2 and greater than or equal to sizeof(link_free). * These two conditions ensure that any 'struct link_free' itself doesn't * span more than 1 page which avoids complex case of mapping 2 pages simply * to restore link_free pointer values. @@ -530,7 +530,7 @@ static void set_zspage_mapping(struct zspage *zspage, * class maintains a list of zspages where each zspage is divided * into equal sized chunks. Each allocation falls into one of these * classes depending on its size. This function returns index of the - * size class which has chunk size big enough to hold the give size. + * size class which has chunk size big enough to hold the given size. */ static int get_size_class_index(int size) { -- cgit v1.2.3-71-gd317 From bbcd53c960713507ae764bf81970651b5577b95a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 6 May 2021 18:05:55 -0700 Subject: drivers/char: remove /dev/kmem for good Patch series "drivers/char: remove /dev/kmem for good". Exploring /dev/kmem and /dev/mem in the context of memory hot(un)plug and memory ballooning, I started questioning the existence of /dev/kmem. Comparing it with the /proc/kcore implementation, it does not seem to be able to deal with things like a) Pages unmapped from the direct mapping (e.g., to be used by secretmem) -> kern_addr_valid(). virt_addr_valid() is not sufficient. b) Special cases like gart aperture memory that is not to be touched -> mem_pfn_is_ram() Unless I am missing something, it's at least broken in some cases and might fault/crash the machine. Looks like its existence has been questioned before in 2005 and 2010 [1], after ~11 additional years, it might make sense to revive the discussion. CONFIG_DEVKMEM is only enabled in a single defconfig (on purpose or by mistake?). All distributions disable it: in Ubuntu it has been disabled for more than 10 years, in Debian since 2.6.31, in Fedora at least starting with FC3, in RHEL starting with RHEL4, in SUSE starting from 15sp2, and OpenSUSE has it disabled as well. 1) /dev/kmem was popular for rootkits [2] before it got disabled basically everywhere. Ubuntu documents [3] "There is no modern user of /dev/kmem any more beyond attackers using it to load kernel rootkits.". RHEL documents in a BZ [5] "it served no practical purpose other than to serve as a potential security problem or to enable binary module drivers to access structures/functions they shouldn't be touching" 2) /proc/kcore is a decent interface to have a controlled way to read kernel memory for debugging puposes. (will need some extensions to deal with memory offlining/unplug, memory ballooning, and poisoned pages, though) 3) It might be useful for corner case debugging [1]. KDB/KGDB might be a better fit, especially, to write random memory; harder to shoot yourself into the foot. 4) "Kernel Memory Editor" [4] hasn't seen any updates since 2000 and seems to be incompatible with 64bit [1]. For educational purposes, /proc/kcore might be used to monitor value updates -- or older kernels can be used. 5) It's broken on arm64, and therefore, completely disabled there. Looks like it's essentially unused and has been replaced by better suited interfaces for individual tasks (/proc/kcore, KDB/KGDB). Let's just remove it. [1] https://lwn.net/Articles/147901/ [2] https://www.linuxjournal.com/article/10505 [3] https://wiki.ubuntu.com/Security/Features#A.2Fdev.2Fkmem_disabled [4] https://sourceforge.net/projects/kme/ [5] https://bugzilla.redhat.com/show_bug.cgi?id=154796 Link: https://lkml.kernel.org/r/20210324102351.6932-1-david@redhat.com Link: https://lkml.kernel.org/r/20210324102351.6932-2-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Michal Hocko Acked-by: Kees Cook Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: "Alexander A. Klimov" Cc: Alexander Viro Cc: Alexandre Belloni Cc: Andrew Lunn Cc: Andrey Zhizhikin Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Brian Cain Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Chris Zankel Cc: Corentin Labbe Cc: "David S. Miller" Cc: "Eric W. Biederman" Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Greentime Hu Cc: Gregory Clement Cc: Heiko Carstens Cc: Helge Deller Cc: Hillf Danton Cc: huang ying Cc: Ingo Molnar Cc: Ivan Kokshaysky Cc: "James E.J. Bottomley" Cc: James Troup Cc: Jiaxun Yang Cc: Jonas Bonn Cc: Jonathan Corbet Cc: Kairui Song Cc: Krzysztof Kozlowski Cc: Kuninori Morimoto Cc: Liviu Dudau Cc: Lorenzo Pieralisi Cc: Luc Van Oostenryck Cc: Luis Chamberlain Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Mike Rapoport Cc: Mikulas Patocka Cc: Minchan Kim Cc: Niklas Schnelle Cc: Oleksiy Avramchenko Cc: openrisc@lists.librecores.org Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: "Pavel Machek (CIP)" Cc: Pavel Machek Cc: "Peter Zijlstra (Intel)" Cc: Pierre Morel Cc: Randy Dunlap Cc: Richard Henderson Cc: Rich Felker Cc: Robert Richter Cc: Rob Herring Cc: Russell King Cc: Sam Ravnborg Cc: Sebastian Andrzej Siewior Cc: Sebastian Hesselbarth Cc: sparclinux@vger.kernel.org Cc: Stafford Horne Cc: Stefan Kristiansson Cc: Steven Rostedt Cc: Sudeep Holla Cc: Theodore Dubois Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Viresh Kumar Cc: William Cohen Cc: Xiaoming Ni Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/admin-guide/devices.txt | 2 +- arch/arm/configs/dove_defconfig | 1 - arch/arm/configs/magician_defconfig | 1 - arch/arm/configs/moxart_defconfig | 1 - arch/arm/configs/mps2_defconfig | 1 - arch/arm/configs/mvebu_v5_defconfig | 1 - arch/arm/configs/xcep_defconfig | 1 - arch/hexagon/configs/comet_defconfig | 1 - arch/m68k/configs/amcore_defconfig | 1 - arch/openrisc/configs/or1ksim_defconfig | 1 - arch/sh/configs/edosk7705_defconfig | 1 - arch/sh/configs/se7206_defconfig | 1 - arch/sh/configs/sh2007_defconfig | 1 - arch/sh/configs/sh7724_generic_defconfig | 1 - arch/sh/configs/sh7770_generic_defconfig | 1 - arch/sh/configs/sh7785lcr_32bit_defconfig | 1 - arch/sparc/configs/sparc64_defconfig | 1 - arch/xtensa/configs/xip_kc705_defconfig | 1 - drivers/char/Kconfig | 10 -- drivers/char/mem.c | 231 ------------------------------ include/linux/fs.h | 2 +- include/linux/vmalloc.h | 2 +- kernel/configs/android-base.config | 1 - mm/ksm.c | 2 +- mm/vmalloc.c | 2 +- 25 files changed, 5 insertions(+), 264 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt index ef41f77cb979..9c2be821c225 100644 --- a/Documentation/admin-guide/devices.txt +++ b/Documentation/admin-guide/devices.txt @@ -4,7 +4,7 @@ 1 char Memory devices 1 = /dev/mem Physical memory access - 2 = /dev/kmem Kernel virtual memory access + 2 = /dev/kmem OBSOLETE - replaced by /proc/kcore 3 = /dev/null Null device 4 = /dev/port I/O port access 5 = /dev/zero Null byte source diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig index e70c997d5f4c..b935162a8bba 100644 --- a/arch/arm/configs/dove_defconfig +++ b/arch/arm/configs/dove_defconfig @@ -63,7 +63,6 @@ CONFIG_INPUT_EVDEV=y # CONFIG_MOUSE_PS2 is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=16 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig index b4670d42f378..abde1fb23b20 100644 --- a/arch/arm/configs/magician_defconfig +++ b/arch/arm/configs/magician_defconfig @@ -72,7 +72,6 @@ CONFIG_INPUT_TOUCHSCREEN=y CONFIG_INPUT_MISC=y CONFIG_INPUT_UINPUT=m # CONFIG_SERIO is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_PXA=y # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig index 6834e97af348..eacc089d86c5 100644 --- a/arch/arm/configs/moxart_defconfig +++ b/arch/arm/configs/moxart_defconfig @@ -79,7 +79,6 @@ CONFIG_INPUT_EVBUG=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig index 1d923dbb9928..89f4a6ff30bd 100644 --- a/arch/arm/configs/mps2_defconfig +++ b/arch/arm/configs/mps2_defconfig @@ -69,7 +69,6 @@ CONFIG_SMSC911X=y # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MPS2_UART_CONSOLE=y CONFIG_SERIAL_MPS2_UART=y # CONFIG_HW_RANDOM is not set diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig index 4f16716bfc32..d57ff30dabff 100644 --- a/arch/arm/configs/mvebu_v5_defconfig +++ b/arch/arm/configs/mvebu_v5_defconfig @@ -100,7 +100,6 @@ CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_LEGACY_PTY_COUNT=16 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig index f1fbdfc5c8c6..4d8e7f2eaef7 100644 --- a/arch/arm/configs/xcep_defconfig +++ b/arch/arm/configs/xcep_defconfig @@ -53,7 +53,6 @@ CONFIG_NET_ETHERNET=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_PXA=y CONFIG_SERIAL_PXA_CONSOLE=y # CONFIG_LEGACY_PTYS is not set diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig index f19ae2ab0aaa..c5a214716a38 100644 --- a/arch/hexagon/configs/comet_defconfig +++ b/arch/hexagon/configs/comet_defconfig @@ -34,7 +34,6 @@ CONFIG_NET_ETHERNET=y # CONFIG_SERIO is not set # CONFIG_CONSOLE_TRANSLATIONS is not set CONFIG_LEGACY_PTY_COUNT=64 -# CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set CONFIG_SPI=y CONFIG_SPI_DEBUG=y diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig index 3a84f24d41c8..6d9ed2198170 100644 --- a/arch/m68k/configs/amcore_defconfig +++ b/arch/m68k/configs/amcore_defconfig @@ -60,7 +60,6 @@ CONFIG_DM9000=y # CONFIG_VT is not set # CONFIG_UNIX98_PTYS is not set # CONFIG_DEVMEM is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_BAUDRATE=115200 CONFIG_SERIAL_MCF_CONSOLE=y diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig index 75f2da324d0e..6e1e004047c7 100644 --- a/arch/openrisc/configs/or1ksim_defconfig +++ b/arch/openrisc/configs/or1ksim_defconfig @@ -43,7 +43,6 @@ CONFIG_MICREL_PHY=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig index ef7cc31997b1..9ee35269bee2 100644 --- a/arch/sh/configs/edosk7705_defconfig +++ b/arch/sh/configs/edosk7705_defconfig @@ -23,7 +23,6 @@ CONFIG_SH_PCLK_FREQ=31250000 # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set # CONFIG_UNIX98_PTYS is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig index 315b04a8dd2f..601d062250d1 100644 --- a/arch/sh/configs/se7206_defconfig +++ b/arch/sh/configs/se7206_defconfig @@ -71,7 +71,6 @@ CONFIG_SMC91X=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=4 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index 99975db461d8..79f02f1c0dc8 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig @@ -75,7 +75,6 @@ CONFIG_INPUT_FF_MEMLESS=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_LEGACY_PTYS is not set diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig index 2c46c0004780..cbc9389a89a8 100644 --- a/arch/sh/configs/sh7724_generic_defconfig +++ b/arch/sh/configs/sh7724_generic_defconfig @@ -18,7 +18,6 @@ CONFIG_CPU_IDLE=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig index 88193153e51b..ee2357deba0f 100644 --- a/arch/sh/configs/sh7770_generic_defconfig +++ b/arch/sh/configs/sh7770_generic_defconfig @@ -20,7 +20,6 @@ CONFIG_CPU_IDLE=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index 9b885c14c400..5c725c75fcef 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -66,7 +66,6 @@ CONFIG_INPUT_FF_MEMLESS=m CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVBUG=m CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 12a4fb0bd52a..18099099583e 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -122,7 +122,6 @@ CONFIG_INPUT_SPARCSPKR=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_PCIPS2=m CONFIG_SERIO_RAW=m -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SUNSU=y CONFIG_SERIAL_SUNSU_CONSOLE=y CONFIG_SERIAL_SUNSAB=y diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig index 4f1ff9531f6a..062148e17135 100644 --- a/arch/xtensa/configs/xip_kc705_defconfig +++ b/arch/xtensa/configs/xip_kc705_defconfig @@ -72,7 +72,6 @@ CONFIG_MARVELL_PHY=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -CONFIG_DEVKMEM=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d229a2d0c017..b151e0fcdeb5 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -334,16 +334,6 @@ config DEVMEM memory. When in doubt, say "Y". -config DEVKMEM - bool "/dev/kmem virtual device support" - # On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write - depends on !ARM64 - help - Say Y here if you want to support the /dev/kmem device. The - /dev/kmem device is rarely used, but can be used for certain - kind of kernel debugging operations. - When in doubt, say "N". - config NVRAM tristate "/dev/nvram support" depends on X86 || HAVE_ARCH_NVRAM_OPS diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 869b9f5e8e03..15dc54fa1d47 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -403,221 +403,6 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) return 0; } -static int mmap_kmem(struct file *file, struct vm_area_struct *vma) -{ - unsigned long pfn; - - /* Turn a kernel-virtual address into a physical page frame */ - pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; - - /* - * RED-PEN: on some architectures there is more mapped memory than - * available in mem_map which pfn_valid checks for. Perhaps should add a - * new macro here. - * - * RED-PEN: vmalloc is not supported right now. - */ - if (!pfn_valid(pfn)) - return -EIO; - - vma->vm_pgoff = pfn; - return mmap_mem(file, vma); -} - -/* - * This function reads the *virtual* memory as seen by the kernel. - */ -static ssize_t read_kmem(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - unsigned long p = *ppos; - ssize_t low_count, read, sz; - char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ - int err = 0; - - read = 0; - if (p < (unsigned long) high_memory) { - low_count = count; - if (count > (unsigned long)high_memory - p) - low_count = (unsigned long)high_memory - p; - -#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED - /* we don't have page 0 mapped on sparc and m68k.. */ - if (p < PAGE_SIZE && low_count > 0) { - sz = size_inside_page(p, low_count); - if (clear_user(buf, sz)) - return -EFAULT; - buf += sz; - p += sz; - read += sz; - low_count -= sz; - count -= sz; - } -#endif - while (low_count > 0) { - sz = size_inside_page(p, low_count); - - /* - * On ia64 if a page has been mapped somewhere as - * uncached, then it must also be accessed uncached - * by the kernel or data corruption may occur - */ - kbuf = xlate_dev_kmem_ptr((void *)p); - if (!virt_addr_valid(kbuf)) - return -ENXIO; - - if (copy_to_user(buf, kbuf, sz)) - return -EFAULT; - buf += sz; - p += sz; - read += sz; - low_count -= sz; - count -= sz; - if (should_stop_iteration()) { - count = 0; - break; - } - } - } - - if (count > 0) { - kbuf = (char *)__get_free_page(GFP_KERNEL); - if (!kbuf) - return -ENOMEM; - while (count > 0) { - sz = size_inside_page(p, count); - if (!is_vmalloc_or_module_addr((void *)p)) { - err = -ENXIO; - break; - } - sz = vread(kbuf, (char *)p, sz); - if (!sz) - break; - if (copy_to_user(buf, kbuf, sz)) { - err = -EFAULT; - break; - } - count -= sz; - buf += sz; - read += sz; - p += sz; - if (should_stop_iteration()) - break; - } - free_page((unsigned long)kbuf); - } - *ppos = p; - return read ? read : err; -} - - -static ssize_t do_write_kmem(unsigned long p, const char __user *buf, - size_t count, loff_t *ppos) -{ - ssize_t written, sz; - unsigned long copied; - - written = 0; -#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED - /* we don't have page 0 mapped on sparc and m68k.. */ - if (p < PAGE_SIZE) { - sz = size_inside_page(p, count); - /* Hmm. Do something? */ - buf += sz; - p += sz; - count -= sz; - written += sz; - } -#endif - - while (count > 0) { - void *ptr; - - sz = size_inside_page(p, count); - - /* - * On ia64 if a page has been mapped somewhere as uncached, then - * it must also be accessed uncached by the kernel or data - * corruption may occur. - */ - ptr = xlate_dev_kmem_ptr((void *)p); - if (!virt_addr_valid(ptr)) - return -ENXIO; - - copied = copy_from_user(ptr, buf, sz); - if (copied) { - written += sz - copied; - if (written) - break; - return -EFAULT; - } - buf += sz; - p += sz; - count -= sz; - written += sz; - if (should_stop_iteration()) - break; - } - - *ppos += written; - return written; -} - -/* - * This function writes to the *virtual* memory as seen by the kernel. - */ -static ssize_t write_kmem(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - unsigned long p = *ppos; - ssize_t wrote = 0; - ssize_t virtr = 0; - char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ - int err = 0; - - if (p < (unsigned long) high_memory) { - unsigned long to_write = min_t(unsigned long, count, - (unsigned long)high_memory - p); - wrote = do_write_kmem(p, buf, to_write, ppos); - if (wrote != to_write) - return wrote; - p += wrote; - buf += wrote; - count -= wrote; - } - - if (count > 0) { - kbuf = (char *)__get_free_page(GFP_KERNEL); - if (!kbuf) - return wrote ? wrote : -ENOMEM; - while (count > 0) { - unsigned long sz = size_inside_page(p, count); - unsigned long n; - - if (!is_vmalloc_or_module_addr((void *)p)) { - err = -ENXIO; - break; - } - n = copy_from_user(kbuf, buf, sz); - if (n) { - err = -EFAULT; - break; - } - vwrite(kbuf, (char *)p, sz); - count -= sz; - buf += sz; - virtr += sz; - p += sz; - if (should_stop_iteration()) - break; - } - free_page((unsigned long)kbuf); - } - - *ppos = p; - return virtr + wrote ? : err; -} - static ssize_t read_port(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -855,7 +640,6 @@ static int open_port(struct inode *inode, struct file *filp) #define write_zero write_null #define write_iter_zero write_iter_null #define open_mem open_port -#define open_kmem open_mem static const struct file_operations __maybe_unused mem_fops = { .llseek = memory_lseek, @@ -869,18 +653,6 @@ static const struct file_operations __maybe_unused mem_fops = { #endif }; -static const struct file_operations __maybe_unused kmem_fops = { - .llseek = memory_lseek, - .read = read_kmem, - .write = write_kmem, - .mmap = mmap_kmem, - .open = open_kmem, -#ifndef CONFIG_MMU - .get_unmapped_area = get_unmapped_area_mem, - .mmap_capabilities = memory_mmap_capabilities, -#endif -}; - static const struct file_operations null_fops = { .llseek = null_lseek, .read = read_null, @@ -924,9 +696,6 @@ static const struct memdev { } devlist[] = { #ifdef CONFIG_DEVMEM [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, -#endif -#ifdef CONFIG_DEVKMEM - [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, #endif [3] = { "null", 0666, &null_fops, 0 }, #ifdef CONFIG_DEVPORT diff --git a/include/linux/fs.h b/include/linux/fs.h index acef282b97c6..c3c88fdb9b2a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -145,7 +145,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* Expect random access pattern */ #define FMODE_RANDOM ((__force fmode_t)0x1000) -/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ +/* File is huge (eg. /dev/mem): treat loff_t as unsigned */ #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) /* File is opened with O_PATH; almost nothing can be done with it */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 394d03cc0e92..f31ba59fb1ef 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -227,7 +227,7 @@ static inline void set_vm_flush_reset_perms(void *addr) } #endif -/* for /dev/kmem */ +/* for /proc/kcore */ extern long vread(char *buf, char *addr, unsigned long count); extern long vwrite(char *buf, char *addr, unsigned long count); diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config index d3fd428f4b92..eb701b2ac72f 100644 --- a/kernel/configs/android-base.config +++ b/kernel/configs/android-base.config @@ -1,5 +1,4 @@ # KEEP ALPHABETICALLY SORTED -# CONFIG_DEVKMEM is not set # CONFIG_DEVMEM is not set # CONFIG_FHANDLE is not set # CONFIG_INET_LRO is not set diff --git a/mm/ksm.c b/mm/ksm.c index b321a67ebaa9..b7cbcc7d4977 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -459,7 +459,7 @@ static inline bool ksm_test_exit(struct mm_struct *mm) * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, * in case the application has unmapped and remapped mm,addr meanwhile. * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP - * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. + * mmap of /dev/mem, where we would not want to touch it. * * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context * of the process that owns 'vma'. We also do not want to enforce diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9c539f0730a5..2868692c6807 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3219,7 +3219,7 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count) * Note: In usual ops, vread() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without - * any information, as /dev/kmem. + * any information, as /proc/kcore. * * Return: number of bytes for which addr and buf should be increased * (same number as @count) or %0 if [addr...addr+count) doesn't -- cgit v1.2.3-71-gd317 From f7c8ce44ebb113b83135ada6e496db33d8a535e3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 6 May 2021 18:06:06 -0700 Subject: mm/vmalloc: remove vwrite() The last user (/dev/kmem) is gone. Let's drop it. Link: https://lkml.kernel.org/r/20210324102351.6932-4-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Michal Hocko Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: Hillf Danton Cc: Matthew Wilcox Cc: Oleksiy Avramchenko Cc: Steven Rostedt Cc: Minchan Kim Cc: huang ying Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmalloc.h | 1 - mm/nommu.c | 10 ----- mm/vmalloc.c | 116 +----------------------------------------------- 3 files changed, 1 insertion(+), 126 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index f31ba59fb1ef..b6ff16393bf6 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -229,7 +229,6 @@ static inline void set_vm_flush_reset_perms(void *addr) /* for /proc/kcore */ extern long vread(char *buf, char *addr, unsigned long count); -extern long vwrite(char *buf, char *addr, unsigned long count); /* * Internals. Dont't use.. diff --git a/mm/nommu.c b/mm/nommu.c index 5c9ab799c0e6..85a3a68dffb6 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -210,16 +210,6 @@ long vread(char *buf, char *addr, unsigned long count) return count; } -long vwrite(char *buf, char *addr, unsigned long count) -{ - /* Don't allow overflow */ - if ((unsigned long) addr + count < count) - count = -(unsigned long) addr; - - memcpy(addr, buf, count); - return count; -} - /* * vmalloc - allocate virtually contiguous memory * diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2868692c6807..a7f318c9e426 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3146,10 +3146,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) * kmap() and get small overhead in this access function. */ if (p) { - /* - * we can expect USER0 is not used (see vread/vwrite's - * function description) - */ + /* We can expect USER0 is not used -- see vread() */ void *map = kmap_atomic(p); memcpy(buf, map + offset, length); kunmap_atomic(map); @@ -3164,43 +3161,6 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) return copied; } -static int aligned_vwrite(char *buf, char *addr, unsigned long count) -{ - struct page *p; - int copied = 0; - - while (count) { - unsigned long offset, length; - - offset = offset_in_page(addr); - length = PAGE_SIZE - offset; - if (length > count) - length = count; - p = vmalloc_to_page(addr); - /* - * To do safe access to this _mapped_ area, we need - * lock. But adding lock here means that we need to add - * overhead of vmalloc()/vfree() calles for this _debug_ - * interface, rarely used. Instead of that, we'll use - * kmap() and get small overhead in this access function. - */ - if (p) { - /* - * we can expect USER0 is not used (see vread/vwrite's - * function description) - */ - void *map = kmap_atomic(p); - memcpy(map + offset, buf, length); - kunmap_atomic(map); - } - addr += length; - buf += length; - copied += length; - count -= length; - } - return copied; -} - /** * vread() - read vmalloc area in a safe way. * @buf: buffer for reading data @@ -3283,80 +3243,6 @@ finished: return buflen; } -/** - * vwrite() - write vmalloc area in a safe way. - * @buf: buffer for source data - * @addr: vm address. - * @count: number of bytes to be read. - * - * This function checks that addr is a valid vmalloc'ed area, and - * copy data from a buffer to the given addr. If specified range of - * [addr...addr+count) includes some valid address, data is copied from - * proper area of @buf. If there are memory holes, no copy to hole. - * IOREMAP area is treated as memory hole and no copy is done. - * - * If [addr...addr+count) doesn't includes any intersects with alive - * vm_struct area, returns 0. @buf should be kernel's buffer. - * - * Note: In usual ops, vwrite() is never necessary because the caller - * should know vmalloc() area is valid and can use memcpy(). - * This is for routines which have to access vmalloc area without - * any information, as /dev/kmem. - * - * Return: number of bytes for which addr and buf should be - * increased (same number as @count) or %0 if [addr...addr+count) - * doesn't include any intersection with valid vmalloc area - */ -long vwrite(char *buf, char *addr, unsigned long count) -{ - struct vmap_area *va; - struct vm_struct *vm; - char *vaddr; - unsigned long n, buflen; - int copied = 0; - - /* Don't allow overflow */ - if ((unsigned long) addr + count < count) - count = -(unsigned long) addr; - buflen = count; - - spin_lock(&vmap_area_lock); - list_for_each_entry(va, &vmap_area_list, list) { - if (!count) - break; - - if (!va->vm) - continue; - - vm = va->vm; - vaddr = (char *) vm->addr; - if (addr >= vaddr + get_vm_area_size(vm)) - continue; - while (addr < vaddr) { - if (count == 0) - goto finished; - buf++; - addr++; - count--; - } - n = vaddr + get_vm_area_size(vm) - addr; - if (n > count) - n = count; - if (!(vm->flags & VM_IOREMAP)) { - aligned_vwrite(buf, addr, n); - copied++; - } - buf += n; - addr += n; - count -= n; - } -finished: - spin_unlock(&vmap_area_lock); - if (!copied) - return 0; - return buflen; -} - /** * remap_vmalloc_range_partial - map vmalloc pages to userspace * @vma: vma to cover -- cgit v1.2.3-71-gd317 From 2eb70aab25dd9b0013a0035b416dbe0e81e6ad48 Mon Sep 17 00:00:00 2001 From: Bhaskar Chowdhury Date: Thu, 6 May 2021 18:06:24 -0700 Subject: include/linux/pgtable.h: few spelling fixes Few spelling fixes throughout the file. Link: https://lkml.kernel.org/r/20210318201404.6380-1-unixbhaskar@gmail.com Signed-off-by: Bhaskar Chowdhury Acked-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pgtable.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 2194a9cd885c..46b13780c2c8 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -426,7 +426,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres /* * On some architectures hardware does not set page access bit when accessing - * memory page, it is responsibilty of software setting this bit. It brings + * memory page, it is responsibility of software setting this bit. It brings * out extra page fault penalty to track page access bit. For optimization page * access bit can be set during all page fault flow on these arches. * To be differentiate with macro pte_mkyoung, this macro is used on platforms @@ -519,7 +519,7 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); /* * This is an implementation of pmdp_establish() that is only suitable for an * architecture that doesn't have hardware dirty/accessed bits. In this case we - * can't race with CPU which sets these bits and non-atomic aproach is fine. + * can't race with CPU which sets these bits and non-atomic approach is fine. */ static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) @@ -852,7 +852,7 @@ static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, * updates, but to prevent any updates it may make from being lost. * * This does not protect against other software modifications of the - * pte; the appropriate pte lock must be held over the transation. + * pte; the appropriate pte lock must be held over the transaction. * * Note that this interface is intended to be batchable, meaning that * ptep_modify_prot_commit may not actually update the pte, but merely @@ -1281,13 +1281,13 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) * * The complete check uses is_pmd_migration_entry() in linux/swapops.h * But using that requires moving current function and pmd_trans_unstable() - * to linux/swapops.h to resovle dependency, which is too much code move. + * to linux/swapops.h to resolve dependency, which is too much code move. * * !pmd_present() is equivalent to is_pmd_migration_entry() currently, * because !pmd_present() pages can only be under migration not swapped * out. * - * pmd_none() is preseved for future condition checks on pmd migration + * pmd_none() is preserved for future condition checks on pmd migration * entries and not confusing with this function name, although it is * redundant with !pmd_present(). */ -- cgit v1.2.3-71-gd317 From fa60ce2cb4506701c43bd4cf3ca23d970daf1b9c Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 6 May 2021 18:06:44 -0700 Subject: treewide: remove editor modelines and cruft The section "19) Editor modelines and other cruft" in Documentation/process/coding-style.rst clearly says, "Do not include any of these in source files." I recently receive a patch to explicitly add a new one. Let's do treewide cleanups, otherwise some people follow the existing code and attempt to upstream their favoriate editor setups. It is even nicer if scripts/checkpatch.pl can check it. If we like to impose coding style in an editor-independent manner, I think editorconfig (patch [1]) is a saner solution. [1] https://lore.kernel.org/lkml/20200703073143.423557-1-danny@kdrag0n.dev/ Link: https://lkml.kernel.org/r/20210324054457.1477489-1-masahiroy@kernel.org Signed-off-by: Masahiro Yamada Acked-by: Geert Uytterhoeven Reviewed-by: Miguel Ojeda [auxdisplay] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/m68k/atari/time.c | 7 ------- arch/parisc/include/asm/pdc_chassis.h | 1 - arch/um/drivers/cow.h | 7 ------- drivers/auxdisplay/panel.c | 7 ------- drivers/gpu/drm/qxl/qxl_drv.c | 1 - drivers/media/usb/pwc/pwc-uncompress.c | 3 --- drivers/net/ethernet/adaptec/starfire.c | 8 -------- drivers/net/ethernet/amd/atarilance.c | 8 -------- drivers/net/ethernet/amd/pcnet32.c | 7 ------- .../net/wireless/intersil/orinoco/orinoco_nortel.c | 8 -------- drivers/net/wireless/intersil/orinoco/orinoco_pci.c | 8 -------- drivers/net/wireless/intersil/orinoco/orinoco_plx.c | 8 -------- drivers/net/wireless/intersil/orinoco/orinoco_tmd.c | 8 -------- drivers/parport/parport_ip32.c | 12 ------------ drivers/platform/x86/dell/dell_rbu.c | 3 --- drivers/scsi/53c700.c | 1 - drivers/scsi/53c700.h | 1 - drivers/scsi/ch.c | 6 ------ drivers/scsi/ips.c | 20 -------------------- drivers/scsi/ips.h | 20 -------------------- drivers/scsi/lasi700.c | 1 - drivers/scsi/megaraid/mbox_defs.h | 2 -- drivers/scsi/megaraid/mega_common.h | 2 -- drivers/scsi/megaraid/megaraid_mbox.c | 2 -- drivers/scsi/megaraid/megaraid_mbox.h | 2 -- drivers/scsi/qla1280.c | 12 ------------ drivers/scsi/sni_53c710.c | 1 - drivers/video/fbdev/matrox/matroxfb_base.c | 9 --------- drivers/video/fbdev/vga16fb.c | 10 ---------- fs/configfs/configfs_internal.h | 4 +--- fs/configfs/dir.c | 4 +--- fs/configfs/file.c | 4 +--- fs/configfs/inode.c | 4 +--- fs/configfs/item.c | 4 +--- fs/configfs/mount.c | 4 +--- fs/configfs/symlink.c | 4 +--- fs/nfs/dir.c | 7 ------- fs/nfs/nfs4proc.c | 6 ------ fs/nfs/nfs4renewd.c | 6 ------ fs/nfs/nfs4state.c | 6 ------ fs/nfs/nfs4xdr.c | 6 ------ fs/nfsd/nfs4proc.c | 6 ------ fs/nfsd/nfs4xdr.c | 6 ------ fs/nfsd/xdr4.h | 6 ------ fs/ocfs2/acl.c | 4 +--- fs/ocfs2/acl.h | 4 +--- fs/ocfs2/alloc.c | 4 +--- fs/ocfs2/alloc.h | 4 +--- fs/ocfs2/aops.c | 4 +--- fs/ocfs2/aops.h | 4 +--- fs/ocfs2/blockcheck.c | 4 +--- fs/ocfs2/blockcheck.h | 4 +--- fs/ocfs2/buffer_head_io.c | 4 +--- fs/ocfs2/buffer_head_io.h | 4 +--- fs/ocfs2/cluster/heartbeat.c | 4 +--- fs/ocfs2/cluster/heartbeat.h | 4 +--- fs/ocfs2/cluster/masklog.c | 4 +--- fs/ocfs2/cluster/masklog.h | 4 +--- fs/ocfs2/cluster/netdebug.c | 4 +--- fs/ocfs2/cluster/nodemanager.c | 4 +--- fs/ocfs2/cluster/nodemanager.h | 4 +--- fs/ocfs2/cluster/ocfs2_heartbeat.h | 4 +--- fs/ocfs2/cluster/ocfs2_nodemanager.h | 4 +--- fs/ocfs2/cluster/quorum.c | 4 +--- fs/ocfs2/cluster/quorum.h | 4 +--- fs/ocfs2/cluster/sys.c | 4 +--- fs/ocfs2/cluster/sys.h | 4 +--- fs/ocfs2/cluster/tcp.c | 4 +--- fs/ocfs2/cluster/tcp.h | 4 +--- fs/ocfs2/cluster/tcp_internal.h | 4 +--- fs/ocfs2/dcache.c | 4 +--- fs/ocfs2/dcache.h | 4 +--- fs/ocfs2/dir.c | 4 +--- fs/ocfs2/dir.h | 4 +--- fs/ocfs2/dlm/dlmapi.h | 4 +--- fs/ocfs2/dlm/dlmast.c | 4 +--- fs/ocfs2/dlm/dlmcommon.h | 4 +--- fs/ocfs2/dlm/dlmconvert.c | 4 +--- fs/ocfs2/dlm/dlmconvert.h | 4 +--- fs/ocfs2/dlm/dlmdebug.c | 4 +--- fs/ocfs2/dlm/dlmdebug.h | 4 +--- fs/ocfs2/dlm/dlmdomain.c | 4 +--- fs/ocfs2/dlm/dlmdomain.h | 4 +--- fs/ocfs2/dlm/dlmlock.c | 4 +--- fs/ocfs2/dlm/dlmmaster.c | 4 +--- fs/ocfs2/dlm/dlmrecovery.c | 4 +--- fs/ocfs2/dlm/dlmthread.c | 4 +--- fs/ocfs2/dlm/dlmunlock.c | 4 +--- fs/ocfs2/dlmfs/dlmfs.c | 4 +--- fs/ocfs2/dlmfs/userdlm.c | 4 +--- fs/ocfs2/dlmfs/userdlm.h | 4 +--- fs/ocfs2/dlmglue.c | 4 +--- fs/ocfs2/dlmglue.h | 4 +--- fs/ocfs2/export.c | 4 +--- fs/ocfs2/export.h | 4 +--- fs/ocfs2/extent_map.c | 4 +--- fs/ocfs2/extent_map.h | 4 +--- fs/ocfs2/file.c | 4 +--- fs/ocfs2/file.h | 4 +--- fs/ocfs2/filecheck.c | 4 +--- fs/ocfs2/filecheck.h | 4 +--- fs/ocfs2/heartbeat.c | 4 +--- fs/ocfs2/heartbeat.h | 4 +--- fs/ocfs2/inode.c | 4 +--- fs/ocfs2/inode.h | 4 +--- fs/ocfs2/journal.c | 4 +--- fs/ocfs2/journal.h | 4 +--- fs/ocfs2/localalloc.c | 4 +--- fs/ocfs2/localalloc.h | 4 +--- fs/ocfs2/locks.c | 4 +--- fs/ocfs2/locks.h | 4 +--- fs/ocfs2/mmap.c | 4 +--- fs/ocfs2/move_extents.c | 4 +--- fs/ocfs2/move_extents.h | 4 +--- fs/ocfs2/namei.c | 4 +--- fs/ocfs2/namei.h | 4 +--- fs/ocfs2/ocfs1_fs_compat.h | 4 +--- fs/ocfs2/ocfs2.h | 4 +--- fs/ocfs2/ocfs2_fs.h | 4 +--- fs/ocfs2/ocfs2_ioctl.h | 4 +--- fs/ocfs2/ocfs2_lockid.h | 4 +--- fs/ocfs2/ocfs2_lockingver.h | 4 +--- fs/ocfs2/refcounttree.c | 4 +--- fs/ocfs2/refcounttree.h | 4 +--- fs/ocfs2/reservations.c | 4 +--- fs/ocfs2/reservations.h | 4 +--- fs/ocfs2/resize.c | 4 +--- fs/ocfs2/resize.h | 4 +--- fs/ocfs2/slot_map.c | 4 +--- fs/ocfs2/slot_map.h | 4 +--- fs/ocfs2/stack_o2cb.c | 4 +--- fs/ocfs2/stack_user.c | 4 +--- fs/ocfs2/stackglue.c | 4 +--- fs/ocfs2/stackglue.h | 4 +--- fs/ocfs2/suballoc.c | 4 +--- fs/ocfs2/suballoc.h | 4 +--- fs/ocfs2/super.c | 4 +--- fs/ocfs2/super.h | 4 +--- fs/ocfs2/symlink.c | 4 +--- fs/ocfs2/symlink.h | 4 +--- fs/ocfs2/sysfile.c | 4 +--- fs/ocfs2/sysfile.h | 4 +--- fs/ocfs2/uptodate.c | 4 +--- fs/ocfs2/uptodate.h | 4 +--- fs/ocfs2/xattr.c | 4 +--- fs/ocfs2/xattr.h | 4 +--- fs/reiserfs/procfs.c | 10 ---------- include/linux/configfs.h | 4 +--- include/linux/genl_magic_func.h | 1 - include/linux/genl_magic_struct.h | 1 - include/uapi/linux/if_bonding.h | 11 ----------- include/uapi/linux/nfs4.h | 6 ------ include/xen/interface/elfnote.h | 10 ---------- include/xen/interface/hvm/hvm_vcpu.h | 10 ---------- include/xen/interface/io/xenbus.h | 10 ---------- samples/configfs/configfs_sample.c | 2 -- tools/usb/hcd-tests.sh | 2 -- 157 files changed, 110 insertions(+), 627 deletions(-) (limited to 'include/linux') diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c index 1068670cb741..7e44d0e9d0f8 100644 --- a/arch/m68k/atari/time.c +++ b/arch/m68k/atari/time.c @@ -317,10 +317,3 @@ int atari_tt_hwclk( int op, struct rtc_time *t ) return( 0 ); } - -/* - * Local variables: - * c-indent-level: 4 - * tab-width: 8 - * End: - */ diff --git a/arch/parisc/include/asm/pdc_chassis.h b/arch/parisc/include/asm/pdc_chassis.h index ae3e108d22ad..d6d82f53d3d0 100644 --- a/arch/parisc/include/asm/pdc_chassis.h +++ b/arch/parisc/include/asm/pdc_chassis.h @@ -365,4 +365,3 @@ void parisc_pdc_chassis_init(void); PDC_CHASSIS_EOM_SET ) #endif /* _PARISC_PDC_CHASSIS_H */ -/* vim: set ts=8 */ diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h index 103adac691ed..9a67c017000f 100644 --- a/arch/um/drivers/cow.h +++ b/arch/um/drivers/cow.h @@ -24,10 +24,3 @@ extern void cow_sizes(int version, __u64 size, int sectorsize, int align, int *data_offset_out); #endif - -/* - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index ff5755ee5694..eba04c0de7eb 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1737,10 +1737,3 @@ module_init(panel_init_module); module_exit(panel_cleanup_module); MODULE_AUTHOR("Willy Tarreau"); MODULE_LICENSE("GPL"); - -/* - * Local variables: - * c-indent-level: 4 - * tab-width: 8 - * End: - */ diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 1864467f1063..6754f578fed2 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -1,4 +1,3 @@ -/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */ /* qxl_drv.c -- QXL driver -*- linux-c -*- * * Copyright 2011 Red Hat, Inc. diff --git a/drivers/media/usb/pwc/pwc-uncompress.c b/drivers/media/usb/pwc/pwc-uncompress.c index abfc88391036..68bc3829c6b3 100644 --- a/drivers/media/usb/pwc/pwc-uncompress.c +++ b/drivers/media/usb/pwc/pwc-uncompress.c @@ -9,9 +9,6 @@ Please send bug reports and support requests to . The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. - - - vim: set ts=8: */ #include diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 555299737b51..7965e5e3c985 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -2070,11 +2070,3 @@ static void __exit starfire_cleanup (void) module_init(starfire_init); module_exit(starfire_cleanup); - - -/* - * Local variables: - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 961796abab35..c1eab916438f 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -1156,11 +1156,3 @@ static void __exit atarilance_module_exit(void) module_init(atarilance_module_init); module_exit(atarilance_module_exit); #endif /* MODULE */ - - -/* - * Local variables: - * c-indent-level: 4 - * tab-width: 4 - * End: - */ diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index aa412506832d..4100ab07e6b7 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -3029,10 +3029,3 @@ static void __exit pcnet32_cleanup_module(void) module_init(pcnet32_init_module); module_exit(pcnet32_cleanup_module); - -/* - * Local variables: - * c-indent-level: 4 - * tab-width: 8 - * End: - */ diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c b/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c index 96a03d10a080..18bd0d9876c2 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c @@ -312,11 +312,3 @@ static void __exit orinoco_nortel_exit(void) module_init(orinoco_nortel_init); module_exit(orinoco_nortel_exit); - -/* - * Local variables: - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_pci.c b/drivers/net/wireless/intersil/orinoco/orinoco_pci.c index f3c86b07b1b9..7e3a6dd60c15 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_pci.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_pci.c @@ -255,11 +255,3 @@ static void __exit orinoco_pci_exit(void) module_init(orinoco_pci_init); module_exit(orinoco_pci_exit); - -/* - * Local variables: - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_plx.c b/drivers/net/wireless/intersil/orinoco/orinoco_plx.c index 16dada94c774..73e6ae124013 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_plx.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_plx.c @@ -360,11 +360,3 @@ static void __exit orinoco_plx_exit(void) module_init(orinoco_plx_init); module_exit(orinoco_plx_exit); - -/* - * Local variables: - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c b/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c index 9a9d335611ac..939d5a1dce97 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c @@ -235,11 +235,3 @@ static void __exit orinoco_tmd_exit(void) module_init(orinoco_tmd_init); module_exit(orinoco_tmd_exit); - -/* - * Local variables: - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c index 48b084e86dc6..0919ed99ba94 100644 --- a/drivers/parport/parport_ip32.c +++ b/drivers/parport/parport_ip32.c @@ -2224,15 +2224,3 @@ MODULE_PARM_DESC(features, ", bit 2: hardware SPP mode" ", bit 3: hardware EPP mode" ", bit 4: hardware ECP mode"); - -/*--- Inform (X)Emacs about preferred coding style ---------------------*/ -/* - * Local Variables: - * mode: c - * c-file-style: "linux" - * indent-tabs-mode: t - * tab-width: 8 - * fill-column: 78 - * ispell-local-dictionary: "american" - * End: - */ diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c index 03c3ff34bcf5..085ad0a0d22e 100644 --- a/drivers/platform/x86/dell/dell_rbu.c +++ b/drivers/platform/x86/dell/dell_rbu.c @@ -675,6 +675,3 @@ static __exit void dcdrbu_exit(void) module_exit(dcdrbu_exit); module_init(dcdrbu_init); - -/* vim:noet:ts=8:sw=8 -*/ diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index ab42feab233f..77ccb96e5ed4 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8 -*- */ /* NCR (or Symbios) 53c700 and 53c700-66 Driver * diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h index c9f8c497babb..2df347ca91af 100644 --- a/drivers/scsi/53c700.h +++ b/drivers/scsi/53c700.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* -*- mode: c; c-basic-offset: 8 -*- */ /* Driver for 53c700 and 53c700-66 chips from NCR and Symbios * diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index cb74ab1ae5a4..9b89c26ccfdb 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -1058,9 +1058,3 @@ static void __exit exit_ch_module(void) module_init(init_ch_module); module_exit(exit_ch_module); - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 1a3c534826ba..bc33d54a4011 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -7099,23 +7099,3 @@ ips_init_phase2(int index) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING); MODULE_VERSION(IPS_VER_STRING); - - -/* - * Overrides for Emacs so that we almost follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-indent-level: 2 - * c-brace-imaginary-offset: 0 - * c-brace-offset: -2 - * c-argdecl-indent: 2 - * c-label-offset: -2 - * c-continued-statement-offset: 2 - * c-continued-brace-offset: 0 - * indent-tabs-mode: nil - * tab-width: 8 - * End: - */ diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h index 6c0678fb9a67..65edf000e447 100644 --- a/drivers/scsi/ips.h +++ b/drivers/scsi/ips.h @@ -1211,23 +1211,3 @@ typedef struct { IPS_COMPAT_TAMPA, \ IPS_COMPAT_KEYWEST \ } - - -/* - * Overrides for Emacs so that we almost follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-indent-level: 2 - * c-brace-imaginary-offset: 0 - * c-brace-offset: -2 - * c-argdecl-indent: 2 - * c-label-offset: -2 - * c-continued-statement-offset: 2 - * c-continued-brace-offset: 0 - * indent-tabs-mode: nil - * tab-width: 8 - * End: - */ diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c index de71d240a56f..6d14a7a94d0b 100644 --- a/drivers/scsi/lasi700.c +++ b/drivers/scsi/lasi700.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8 -*- */ /* PARISC LASI driver for the 53c700 chip * diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h index 01a1bfb8ea2a..f0ef8f7f82c1 100644 --- a/drivers/scsi/megaraid/mbox_defs.h +++ b/drivers/scsi/megaraid/mbox_defs.h @@ -781,5 +781,3 @@ typedef struct { } __attribute__ ((packed)) mbox_sgl32; #endif // _MRAID_MBOX_DEFS_H_ - -/* vim: set ts=8 sw=8 tw=78: */ diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h index 3a7596e47a88..2ad0aa2f837d 100644 --- a/drivers/scsi/megaraid/mega_common.h +++ b/drivers/scsi/megaraid/mega_common.h @@ -282,5 +282,3 @@ struct mraid_pci_blk { }; #endif // _MEGA_COMMON_H_ - -// vim: set ts=8 sw=8 tw=78: diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index b1a2d3536add..145fde302d7d 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -4068,5 +4068,3 @@ megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, cha */ module_init(megaraid_init); module_exit(megaraid_exit); - -/* vim: set ts=8 sw=8 tw=78 ai si: */ diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h index 3e4347c6dab1..d2fe7f69cd5d 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.h +++ b/drivers/scsi/megaraid/megaraid_mbox.h @@ -230,5 +230,3 @@ typedef struct { #define WROUTDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x2C) #endif // _MEGARAID_H_ - -// vim: set ts=8 sw=8 tw=78: diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 8f35174a1f9a..928da90b79be 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -4403,15 +4403,3 @@ MODULE_FIRMWARE("qlogic/1040.bin"); MODULE_FIRMWARE("qlogic/1280.bin"); MODULE_FIRMWARE("qlogic/12160.bin"); MODULE_VERSION(QLA1280_VERSION); - -/* - * Overrides for Emacs so that we almost follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index 97c6f81b1d2a..678651b9b4dd 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8 -*- */ /* SNI RM driver * diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index a3853421b263..4325bf7f388c 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -2608,12 +2608,3 @@ EXPORT_SYMBOL(matroxfb_register_driver); EXPORT_SYMBOL(matroxfb_unregister_driver); EXPORT_SYMBOL(matroxfb_wait_for_sync); EXPORT_SYMBOL(matroxfb_enable_irq); - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * --------------------------------------------------------------------------- - * Local variables: - * c-basic-offset: 8 - * End: - */ - diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 1e8a38a7967d..e2757ff1c23d 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -1451,13 +1451,3 @@ MODULE_DESCRIPTION("Legacy VGA framebuffer device driver"); MODULE_LICENSE("GPL"); module_init(vga16fb_init); module_exit(vga16fb_exit); - - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * --------------------------------------------------------------------------- - * Local variables: - * c-basic-offset: 8 - * End: - */ - diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index 9a3aed249692..c0395363eab9 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset:8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * configfs_internal.h - Internal stuff for configfs * * Based on sysfs: diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index b6098e02e20b..ac5e0c0e9181 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dir.c - Operations for configfs directories. * * Based on sysfs: diff --git a/fs/configfs/file.c b/fs/configfs/file.c index da8351d1e455..e26060dae70a 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * file.c - operations for regular (text) files. * * Based on sysfs: diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 42c348bb2903..eb5ec3e46283 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * inode.c - basic inode and dentry operations. * * Based on sysfs: diff --git a/fs/configfs/item.c b/fs/configfs/item.c index 704a4356f137..254170a82aa3 100644 --- a/fs/configfs/item.c +++ b/fs/configfs/item.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * item.c - library routines for handling generic config items * * Based on kobject: diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index 0c6e8cf61953..c2d820063ec4 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * mount.c - operations for initializing and mounting configfs. * * Based on sysfs: diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index 77c854364e60..0623c3edcfb9 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * symlink.c - operations for configfs symlinks. * * Based on sysfs: diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index fc4f490f2d78..3d8e3698d3df 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -3004,10 +3004,3 @@ out_notsup: goto out; } EXPORT_SYMBOL_GPL(nfs_permission); - -/* - * Local variables: - * version-control: t - * kept-new-versions: 5 - * End: - */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c65c4b41e2c1..545010d6cbf3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -10427,9 +10427,3 @@ const struct xattr_handler *nfs4_xattr_handlers[] = { #endif NULL }; - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index ff876dda7f06..db3811af0796 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -149,9 +149,3 @@ void nfs4_set_lease_period(struct nfs_client *clp, /* Cap maximum reconnect timeout at 1/2 lease period */ rpc_set_connect_timeout(clp->cl_rpcclient, lease, lease >> 1); } - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 3a51351bdc6a..2eec5bbb55c8 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2695,9 +2695,3 @@ static int nfs4_run_state_manager(void *ptr) module_put_and_exit(0); return 0; } - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index ac6b79ee9355..d4fd3be0e8ca 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -7629,9 +7629,3 @@ const struct rpc_version nfs_version4 = { .procs = nfs4_procedures, .counts = nfs_version4_counts, }; - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index daf43b980d4b..f4ce93d7f26e 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -3317,9 +3317,3 @@ const struct svc_version nfsd_version4 = { .vs_rpcb_optnl = true, .vs_need_cong_ctrl = true, }; - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index e0f06d3cbd44..7abeccb975b2 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -5448,9 +5448,3 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p) nfsd4_sequence_done(resp); return 1; } - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index fe540a3415c6..a7c425254fee 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -866,9 +866,3 @@ struct nfsd4_operation { #endif - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 5259badabb56..5c72a7e6d6c5 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * acl.c * * Copyright (C) 2004, 2008 Oracle. All rights reserved. diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h index 4e86450917b2..f59d8d0a61fa 100644 --- a/fs/ocfs2/acl.h +++ b/fs/ocfs2/acl.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * acl.h * * Copyright (C) 2004, 2008 Oracle. All rights reserved. diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 78710788c237..e032f2e2c2c5 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * alloc.c * * Extent allocs and frees diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 7f973dd76dbc..4af7abaa6e40 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * alloc.h * * Function prototypes diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index ad20403b383f..1294925ac94a 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * Copyright (C) 2002, 2004 Oracle. All rights reserved. */ diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 70ed4382750d..3a520117fa59 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved. */ diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index dabfef9c2bc0..863a5316030b 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * blockcheck.c * * Checksum and ECC codes for the OCFS2 userspace library. diff --git a/fs/ocfs2/blockcheck.h b/fs/ocfs2/blockcheck.h index 8f17d2c85f40..d0578e98ee8d 100644 --- a/fs/ocfs2/blockcheck.h +++ b/fs/ocfs2/blockcheck.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * blockcheck.h * * Checksum and ECC codes for the OCFS2 userspace library. diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index f0b104e483d8..e7758778abef 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * io.c * * Buffer cache handling diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h index 1c5e533fba04..2d51649fc090 100644 --- a/fs/ocfs2/buffer_head_io.h +++ b/fs/ocfs2/buffer_head_io.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * ocfs2_buffer_head.h * * Buffer cache handling functions defined diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 12a7590601dd..e829c2595543 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * Copyright (C) 2004, 2005 Oracle. All rights reserved. */ diff --git a/fs/ocfs2/cluster/heartbeat.h b/fs/ocfs2/cluster/heartbeat.h index beed31ea86cf..1d4100abf6f8 100644 --- a/fs/ocfs2/cluster/heartbeat.h +++ b/fs/ocfs2/cluster/heartbeat.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * heartbeat.h * * Function prototypes diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c index 1d696c96b8b2..810d32815593 100644 --- a/fs/ocfs2/cluster/masklog.c +++ b/fs/ocfs2/cluster/masklog.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * Copyright (C) 2004, 2005 Oracle. All rights reserved. */ diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index 446e452ac7a6..b73fc42e46ff 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * Copyright (C) 2005 Oracle. All rights reserved. */ diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 667a5c5e1f66..7524994e3199 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * netdebug.c * * debug functionality for o2net diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c index 7a7640c59f3c..bb82e6b1ff4e 100644 --- a/fs/ocfs2/cluster/nodemanager.c +++ b/fs/ocfs2/cluster/nodemanager.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * Copyright (C) 2004, 2005 Oracle. All rights reserved. */ diff --git a/fs/ocfs2/cluster/nodemanager.h b/fs/ocfs2/cluster/nodemanager.h index 3e0006631cc4..3490e77a952d 100644 --- a/fs/ocfs2/cluster/nodemanager.h +++ b/fs/ocfs2/cluster/nodemanager.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * nodemanager.h * * Function prototypes diff --git a/fs/ocfs2/cluster/ocfs2_heartbeat.h b/fs/ocfs2/cluster/ocfs2_heartbeat.h index 760d850be11e..6088c9f974dd 100644 --- a/fs/ocfs2/cluster/ocfs2_heartbeat.h +++ b/fs/ocfs2/cluster/ocfs2_heartbeat.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * ocfs2_heartbeat.h * * On-disk structures for ocfs2_heartbeat diff --git a/fs/ocfs2/cluster/ocfs2_nodemanager.h b/fs/ocfs2/cluster/ocfs2_nodemanager.h index 21ad307419a8..c9a0b77443e7 100644 --- a/fs/ocfs2/cluster/ocfs2_nodemanager.h +++ b/fs/ocfs2/cluster/ocfs2_nodemanager.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * ocfs2_nodemanager.h * * Header describing the interface between userspace and the kernel diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index cea739be77c4..189c111bc371 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * - * vim: noexpandtab sw=8 ts=8 sts=0: +/* * * Copyright (C) 2005 Oracle. All rights reserved. */ diff --git a/fs/ocfs2/cluster/quorum.h b/fs/ocfs2/cluster/quorum.h index 6d45ce8b18a1..d64bf4482a4a 100644 --- a/fs/ocfs2/cluster/quorum.h +++ b/fs/ocfs2/cluster/quorum.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * Copyright (C) 2005 Oracle. All rights reserved. */ diff --git a/fs/ocfs2/cluster/sys.c b/fs/ocfs2/cluster/sys.c index d6067c3d84c1..022f716c74ff 100644 --- a/fs/ocfs2/cluster/sys.c +++ b/fs/ocfs2/cluster/sys.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * sys.c * * OCFS2 cluster sysfs interface diff --git a/fs/ocfs2/cluster/sys.h b/fs/ocfs2/cluster/sys.h index ce380517cf17..70aaba65317e 100644 --- a/fs/ocfs2/cluster/sys.h +++ b/fs/ocfs2/cluster/sys.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * sys.h * * Function prototypes for o2cb sysfs interface diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 3bd8119bed5e..f660c0dbdb63 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * - * vim: noexpandtab sw=8 ts=8 sts=0: +/* * * Copyright (C) 2004 Oracle. All rights reserved. * diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h index 736338f45c59..a75b551d31c7 100644 --- a/fs/ocfs2/cluster/tcp.h +++ b/fs/ocfs2/cluster/tcp.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * tcp.h * * Function prototypes diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index e6a2b9dfcd16..601c99bd2611 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * Copyright (C) 2005 Oracle. All rights reserved. */ diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 42a61eecdacd..04fc8344063a 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dcache.c * * dentry cache handling code diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h index 3686a52ba143..7f246c5692d8 100644 --- a/fs/ocfs2/dcache.h +++ b/fs/ocfs2/dcache.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dcache.h * * Function prototypes diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index bdfba9db558a..bd8d534f11cb 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dir.c * * Creates, reads, walks and deletes directory-nodes diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h index e3e7d5dd29e8..4b9f5a12c7d2 100644 --- a/fs/ocfs2/dir.h +++ b/fs/ocfs2/dir.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dir.h * * Function prototypes diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h index 6456c0fbcbb2..bae60ca2672a 100644 --- a/fs/ocfs2/dlm/dlmapi.h +++ b/fs/ocfs2/dlm/dlmapi.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmapi.h * * externally exported dlm interfaces diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 70a10764f249..c681ba957932 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmast.c * * AST and BAST functionality for local and remote nodes diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 58d57e25d384..fd2022712167 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmcommon.h * * Copyright (C) 2004 Oracle. All rights reserved. diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index 6051edc33aef..450d46eefab3 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmconvert.c * * underlying calls for lock conversion diff --git a/fs/ocfs2/dlm/dlmconvert.h b/fs/ocfs2/dlm/dlmconvert.h index 12d9c28bc52f..1f371716513b 100644 --- a/fs/ocfs2/dlm/dlmconvert.h +++ b/fs/ocfs2/dlm/dlmconvert.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmconvert.h * * Copyright (C) 2004 Oracle. All rights reserved. diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 4b8b41d23e91..d442cf5dda8a 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmdebug.c * * debug functionality for the dlm diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h index f8fd8680a4b6..e08f7357e7ec 100644 --- a/fs/ocfs2/dlm/dlmdebug.h +++ b/fs/ocfs2/dlm/dlmdebug.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmdebug.h * * Copyright (C) 2008 Oracle. All rights reserved. diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 357cfc702ce3..9f90fc9551e1 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmdomain.c * * defines domain join / leave apis diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h index 7c21664d23d0..815abe30ad09 100644 --- a/fs/ocfs2/dlm/dlmdomain.h +++ b/fs/ocfs2/dlm/dlmdomain.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmdomain.h * * Copyright (C) 2004 Oracle. All rights reserved. diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 83f0760e4fba..041fd1791ae7 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmlock.c * * underlying calls for lock creation diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index f105746063ed..4960a6de768d 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmmod.c * * standalone DLM module diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index afc51736686c..0e7aad1b11cc 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmrecovery.c * * recovery stuff diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 5ccc4ff0b82a..c350bd4df770 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmthread.c * * standalone DLM module diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index dcb17ca8ae74..61103b2d69fb 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmunlock.c * * underlying calls for unlocking locks diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index b2870f1a31df..fa0a14f199eb 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmfs.c * * Code which implements the kernel side of a minimal userspace diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c index 339f098d9592..29f183a15798 100644 --- a/fs/ocfs2/dlmfs/userdlm.c +++ b/fs/ocfs2/dlmfs/userdlm.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * userdlm.c * * Code which implements the kernel side of a minimal userspace diff --git a/fs/ocfs2/dlmfs/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h index 0558ae768200..47ba18eac423 100644 --- a/fs/ocfs2/dlmfs/userdlm.h +++ b/fs/ocfs2/dlmfs/userdlm.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * userdlm.h * * Userspace dlm defines diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 0fbe8bf7190f..48fd369c29a4 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmglue.c * * Code which implements an OCFS2 specific interface to our DLM. diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index b8fbed25df89..e5da5809ed95 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * dlmglue.h * * description here diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 69ed278dd84d..eaa8c80ace3c 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * export.c * * Functions to facilitate NFS exporting diff --git a/fs/ocfs2/export.h b/fs/ocfs2/export.h index d485da0c3439..636357400505 100644 --- a/fs/ocfs2/export.h +++ b/fs/ocfs2/export.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * export.h * * Function prototypes diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 7b93e9c766f6..70a768b623cf 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * extent_map.c * * Block/Cluster mapping functions diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h index e5464f6cee8a..bc4ed59fb925 100644 --- a/fs/ocfs2/extent_map.h +++ b/fs/ocfs2/extent_map.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * extent_map.h * * In-memory file extent mappings for OCFS2. diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index db8a6265b749..f17c3d33fb18 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * file.c * * File open, close, extend, truncate diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index 8536cec5f122..71db8f3aa027 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * file.h * * Function prototypes diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c index 50f11bfdc8c2..90b8d300c1ee 100644 --- a/fs/ocfs2/filecheck.c +++ b/fs/ocfs2/filecheck.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * filecheck.c * * Code which implements online file check. diff --git a/fs/ocfs2/filecheck.h b/fs/ocfs2/filecheck.h index 4d006777ac54..d3bcb8bcfeb0 100644 --- a/fs/ocfs2/filecheck.h +++ b/fs/ocfs2/filecheck.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * filecheck.h * * Online file check. diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c index 60c5f995d30c..9099d8fc7599 100644 --- a/fs/ocfs2/heartbeat.c +++ b/fs/ocfs2/heartbeat.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * heartbeat.c * * Register ourselves with the heartbaet service, keep our node maps diff --git a/fs/ocfs2/heartbeat.h b/fs/ocfs2/heartbeat.h index 5fedb2d35dc0..f1f8b1802fe4 100644 --- a/fs/ocfs2/heartbeat.h +++ b/fs/ocfs2/heartbeat.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * heartbeat.h * * Function prototypes diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 7c9dfd50c1c1..bc8f32fab964 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * inode.c * * vfs' aops, fops, dops and iops diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index 51a4f7197987..82b28fdacc7e 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * inode.h * * Function prototypes diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index db52e843002a..4e589ce2fce6 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * journal.c * * Defines functions of journalling api diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index bfe611ed1b1d..d158acb8b38a 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * journal.h * * Defines journalling api and structures. diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index fc8252a28cb1..5f6bacbeef6b 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * localalloc.c * * Node local data allocation diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h index e8a5cea48639..08f925b7ec6d 100644 --- a/fs/ocfs2/localalloc.h +++ b/fs/ocfs2/localalloc.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * localalloc.h * * Function prototypes diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c index 7edc4e5c7c2c..fab7c6a4a7d0 100644 --- a/fs/ocfs2/locks.c +++ b/fs/ocfs2/locks.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * locks.c * * Userspace file locking support diff --git a/fs/ocfs2/locks.h b/fs/ocfs2/locks.h index 389fe1fce3a5..b52de3947d5f 100644 --- a/fs/ocfs2/locks.h +++ b/fs/ocfs2/locks.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * locks.h * * Function prototypes for Userspace file locking support diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 25cabbfe87fc..1834f26522ed 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * mmap.c * * Code to deal with the mess that is clustered mmap. diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index 758d9661ef1e..192cad0662d8 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * move_extents.c * * Copyright (C) 2011 Oracle. All rights reserved. diff --git a/fs/ocfs2/move_extents.h b/fs/ocfs2/move_extents.h index 28cac43892c5..987f9e559f30 100644 --- a/fs/ocfs2/move_extents.h +++ b/fs/ocfs2/move_extents.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * move_extents.h * * Copyright (C) 2011 Oracle. All rights reserved. diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 05ced86580d1..2c46ff6ba4ea 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * namei.c * * Create and rename file, directory, symlinks diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h index cc091ed02b4a..9cc891eb874e 100644 --- a/fs/ocfs2/namei.h +++ b/fs/ocfs2/namei.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * namei.h * * Function prototypes diff --git a/fs/ocfs2/ocfs1_fs_compat.h b/fs/ocfs2/ocfs1_fs_compat.h index 01ae48c4834d..6dbcf3d467fb 100644 --- a/fs/ocfs2/ocfs1_fs_compat.h +++ b/fs/ocfs2/ocfs1_fs_compat.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * ocfs1_fs_compat.h * * OCFS1 volume header definitions. OCFS2 creates valid but unmountable diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 7993d527edae..bb62cc2e0211 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * ocfs2.h * * Defines macros and structures used in OCFS2 diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 19137c6d087b..638d875eccc7 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * ocfs2_fs.h * * On-disk structures for OCFS2. diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h index 273616bd4f19..9680797bc531 100644 --- a/fs/ocfs2/ocfs2_ioctl.h +++ b/fs/ocfs2/ocfs2_ioctl.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * ocfs2_ioctl.h * * Defines OCFS2 ioctls. diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h index b4be84956bc1..8ac357ce6a30 100644 --- a/fs/ocfs2/ocfs2_lockid.h +++ b/fs/ocfs2/ocfs2_lockid.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * ocfs2_lockid.h * * Defines OCFS2 lockid bits. diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h index 5c9c105b33ee..31a5e1619e7f 100644 --- a/fs/ocfs2/ocfs2_lockingver.h +++ b/fs/ocfs2/ocfs2_lockingver.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * ocfs2_lockingver.h * * Defines OCFS2 Locking version values. diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index c19a463fac55..7f6355cbb587 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * refcounttree.c * * Copyright (C) 2009 Oracle. All rights reserved. diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h index 0b9014495726..8197a94feec0 100644 --- a/fs/ocfs2/refcounttree.h +++ b/fs/ocfs2/refcounttree.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * refcounttree.h * * Copyright (C) 2009 Oracle. All rights reserved. diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c index bf3842e34fb9..769e466887b0 100644 --- a/fs/ocfs2/reservations.c +++ b/fs/ocfs2/reservations.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * reservations.c * * Allocation reservations implementation diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h index 6ac88122896d..677c50663595 100644 --- a/fs/ocfs2/reservations.h +++ b/fs/ocfs2/reservations.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * reservations.h * * Allocation reservations function prototypes and structures. diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index 24eb52f9059c..d65d43c61857 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * resize.c * * volume resize. diff --git a/fs/ocfs2/resize.h b/fs/ocfs2/resize.h index 0af0c023042c..4990637219ef 100644 --- a/fs/ocfs2/resize.h +++ b/fs/ocfs2/resize.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * resize.h * * Function prototypes diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index 4da0e4b1e79b..0b0ae3ebb0cf 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * slot_map.c * * Copyright (C) 2002, 2004 Oracle. All rights reserved. diff --git a/fs/ocfs2/slot_map.h b/fs/ocfs2/slot_map.h index 93b53e73f0f7..a43644570b53 100644 --- a/fs/ocfs2/slot_map.h +++ b/fs/ocfs2/slot_map.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * slotmap.h * * description here diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index f70012038383..88f75f7f02d7 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * stack_o2cb.c * * Code which interfaces ocfs2 with the o2cb stack. diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 7397064c3f35..85a47621e0c0 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * stack_user.c * * Code which interfaces ocfs2 with fs/dlm and a userspace stack. diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 8d33ebc6b6fc..d50e8b8dfea4 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * stackglue.c * * Code which implements an OCFS2 specific interface to underlying diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index e9d26cbeb3b8..3636847fae19 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * stackglue.h * * Glue to the underlying cluster stack. diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 8c8cf7f4eb34..8521942f5af2 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * suballoc.c * * metadata alloc and free diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index 50b36250beb6..5805a03d100b 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * suballoc.h * * Defines sub allocator api diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 079f8826993e..c86bd4e60e20 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * super.c * * load/unload driver, mount/dismount volumes diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h index 76facaf63336..8312651135b9 100644 --- a/fs/ocfs2/super.h +++ b/fs/ocfs2/super.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * super.h * * Function prototypes diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 94cfacc9bad7..f755a4985821 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * linux/cluster/ssi/cfs/symlink.c * * This program is free software; you can redistribute it and/or diff --git a/fs/ocfs2/symlink.h b/fs/ocfs2/symlink.h index 167094d1e5aa..ffcf0210545c 100644 --- a/fs/ocfs2/symlink.h +++ b/fs/ocfs2/symlink.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * symlink.h * * Function prototypes diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c index bb701c4e449f..53a945da873b 100644 --- a/fs/ocfs2/sysfile.c +++ b/fs/ocfs2/sysfile.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * sysfile.c * * Initialize, read, write, etc. system files. diff --git a/fs/ocfs2/sysfile.h b/fs/ocfs2/sysfile.h index a83dd962fccb..2b38c75990fd 100644 --- a/fs/ocfs2/sysfile.h +++ b/fs/ocfs2/sysfile.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * sysfile.h * * Function prototypes diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index 580852ba05c4..09854925fa5c 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * uptodate.c * * Tracking the up-to-date-ness of a local buffer_head with respect to diff --git a/fs/ocfs2/uptodate.h b/fs/ocfs2/uptodate.h index 77a30cae4879..85d94134001b 100644 --- a/fs/ocfs2/uptodate.h +++ b/fs/ocfs2/uptodate.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * uptodate.h * * Cluster uptodate tracking diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 36ae47a4aef6..dd784eb0cd7c 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * xattr.c * * Copyright (C) 2004, 2008 Oracle. All rights reserved. diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 9c80382da1f5..00308b57f64f 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * xattr.h * * Copyright (C) 2004, 2008 Oracle. All rights reserved. diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index 155b82870333..4a7cb16e9345 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -488,13 +488,3 @@ int reiserfs_proc_info_global_done(void) * (available at http://www.namesys.com/legalese.html) * */ - -/* - * Make Linus happy. - * Local variables: - * c-indentation-style: "K&R" - * mode-name: "LC" - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 2e8c69b43c64..97cfd13bae51 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * configfs.h - definitions for the device driver filesystem * * Based on sysfs: diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 6cb82301d8e9..939b1a8f571b 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h @@ -404,4 +404,3 @@ s_fields \ /* }}}1 */ #endif /* GENL_MAGIC_FUNC_H */ -/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */ diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index 35d21fddaf2d..f81d48987528 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -283,4 +283,3 @@ enum { \ /* }}}1 */ #endif /* GENL_MAGIC_STRUCT_H */ -/* vim: set foldmethod=marker nofoldenable : */ diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h index e8eb4ad03cf1..d174914a837d 100644 --- a/include/uapi/linux/if_bonding.h +++ b/include/uapi/linux/if_bonding.h @@ -153,14 +153,3 @@ enum { #define BOND_3AD_STAT_MAX (__BOND_3AD_STAT_MAX - 1) #endif /* _LINUX_IF_BONDING_H */ - -/* - * Local variables: - * version-control: t - * kept-new-versions: 5 - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ - diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index ed5415e0f1c1..800bb0ffa6e6 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -178,9 +178,3 @@ #define NFS4_MAX_BACK_CHANNEL_OPS 2 #endif /* _UAPI_LINUX_NFS4_H */ - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h index 9e9f9bf7c66d..449bd383cb76 100644 --- a/include/xen/interface/elfnote.h +++ b/include/xen/interface/elfnote.h @@ -208,13 +208,3 @@ #define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_ENTRY #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ - -/* - * Local variables: - * mode: C - * c-set-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/include/xen/interface/hvm/hvm_vcpu.h b/include/xen/interface/hvm/hvm_vcpu.h index 32ca83edd44d..bfc2138e0bf5 100644 --- a/include/xen/interface/hvm/hvm_vcpu.h +++ b/include/xen/interface/hvm/hvm_vcpu.h @@ -131,13 +131,3 @@ struct vcpu_hvm_context { typedef struct vcpu_hvm_context vcpu_hvm_context_t; #endif /* __XEN_PUBLIC_HVM_HVM_VCPU_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h index aaf2951b1cce..fb8716112251 100644 --- a/include/xen/interface/io/xenbus.h +++ b/include/xen/interface/io/xenbus.h @@ -39,13 +39,3 @@ enum xenbus_state }; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ - -/* - * Local variables: - * c-file-style: "linux" - * indent-tabs-mode: t - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff --git a/samples/configfs/configfs_sample.c b/samples/configfs/configfs_sample.c index f9008be7a8a1..37a657b25d58 100644 --- a/samples/configfs/configfs_sample.c +++ b/samples/configfs/configfs_sample.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * vim: noexpandtab ts=8 sts=0 sw=8: - * * configfs_example_macros.c - This file is a demonstration module * containing a number of configfs subsystems. It uses the helper * macros defined by configfs.h diff --git a/tools/usb/hcd-tests.sh b/tools/usb/hcd-tests.sh index e8cad6a4f9c9..73f914d13f5c 100644 --- a/tools/usb/hcd-tests.sh +++ b/tools/usb/hcd-tests.sh @@ -272,5 +272,3 @@ do echo '' done done - -# vim: sw=4 -- cgit v1.2.3-71-gd317 From f0953a1bbaca71e1ebbcb9864eb1b273156157ed Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 May 2021 18:06:47 -0700 Subject: mm: fix typos in comments Fix ~94 single-word typos in locking code comments, plus a few very obvious grammar mistakes. Link: https://lkml.kernel.org/r/20210322212624.GA1963421@gmail.com Link: https://lore.kernel.org/r/20210322205203.GB1959563@gmail.com Signed-off-by: Ingo Molnar Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Randy Dunlap Cc: Bhaskar Chowdhury Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 +- include/linux/vmalloc.h | 4 ++-- mm/balloon_compaction.c | 4 ++-- mm/compaction.c | 4 ++-- mm/filemap.c | 2 +- mm/gup.c | 2 +- mm/highmem.c | 2 +- mm/huge_memory.c | 6 +++--- mm/hugetlb.c | 6 +++--- mm/internal.h | 2 +- mm/kasan/kasan.h | 8 ++++---- mm/kasan/quarantine.c | 4 ++-- mm/kasan/shadow.c | 4 ++-- mm/kfence/report.c | 2 +- mm/khugepaged.c | 2 +- mm/ksm.c | 4 ++-- mm/madvise.c | 4 ++-- mm/memcontrol.c | 18 +++++++++--------- mm/memory-failure.c | 2 +- mm/memory.c | 10 +++++----- mm/mempolicy.c | 4 ++-- mm/migrate.c | 8 ++++---- mm/mmap.c | 4 ++-- mm/mprotect.c | 2 +- mm/mremap.c | 2 +- mm/oom_kill.c | 2 +- mm/page-writeback.c | 4 ++-- mm/page_alloc.c | 14 +++++++------- mm/page_owner.c | 2 +- mm/percpu-internal.h | 2 +- mm/percpu.c | 2 +- mm/pgalloc-track.h | 6 +++--- mm/slab.c | 6 +++--- mm/slub.c | 2 +- mm/swap_slots.c | 2 +- mm/vmalloc.c | 6 +++--- mm/vmstat.c | 2 +- mm/zpool.c | 2 +- mm/zsmalloc.c | 2 +- 39 files changed, 83 insertions(+), 83 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 76e27ebb28a3..322ec61d0da7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -106,7 +106,7 @@ extern int mmap_rnd_compat_bits __read_mostly; * embedding these tags into addresses that point to these memory regions, and * checking that the memory and the pointer tags match on memory accesses) * redefine this macro to strip tags from pointers. - * It's defined as noop for arcitectures that don't support memory tagging. + * It's defined as noop for architectures that don't support memory tagging. */ #ifndef untagged_addr #define untagged_addr(addr) (addr) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index b6ff16393bf6..4d668abb6391 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -33,7 +33,7 @@ struct notifier_block; /* in notifier.h */ * * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after * shadow memory has been mapped. It's used to handle allocation errors so that - * we don't try to poision shadow on free if it was never allocated. + * we don't try to poison shadow on free if it was never allocated. * * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to * determine which allocations need the module shadow freed. @@ -43,7 +43,7 @@ struct notifier_block; /* in notifier.h */ /* * Maximum alignment for ioremap() regions. - * Can be overriden by arch-specific value. + * Can be overridden by arch-specific value. */ #ifndef IOREMAP_MAX_ORDER #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 26de020aae7b..907fefde2572 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); /** * balloon_page_list_dequeue() - removes pages from balloon's page list and * returns a list of the pages. - * @b_dev_info: balloon device decriptor where we will grab a page from. + * @b_dev_info: balloon device descriptor where we will grab a page from. * @pages: pointer to the list of pages that would be returned to the caller. * @n_req_pages: number of requested pages. * @@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue); /* * balloon_page_dequeue - removes a page from balloon's page list and returns * its address to allow the driver to release the page. - * @b_dev_info: balloon device decriptor where we will grab a page from. + * @b_dev_info: balloon device descriptor where we will grab a page from. * * Driver must call this function to properly dequeue a previously enqueued page * before definitively releasing it back to the guest system. diff --git a/mm/compaction.c b/mm/compaction.c index 3a6c6b821f80..84fde270ae74 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2012,8 +2012,8 @@ static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) unsigned int wmark_low; /* - * Cap the low watermak to avoid excessive compaction - * activity in case a user sets the proactivess tunable + * Cap the low watermark to avoid excessive compaction + * activity in case a user sets the proactiveness tunable * close to 100 (maximum). */ wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); diff --git a/mm/filemap.c b/mm/filemap.c index 7fadf211643c..66f7e9fdfbc4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2755,7 +2755,7 @@ unsigned int seek_page_size(struct xa_state *xas, struct page *page) * entirely memory-based such as tmpfs, and filesystems which support * unwritten extents. * - * Return: The requested offset on successs, or -ENXIO if @whence specifies + * Return: The requested offset on success, or -ENXIO if @whence specifies * SEEK_DATA and there is no data after @start. There is an implicit hole * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start * and @end contain data. diff --git a/mm/gup.c b/mm/gup.c index aa09535cf4d4..0697134b6a12 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1575,7 +1575,7 @@ finish_or_fault: * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - - * allowing a hole to be left in the corefile to save diskspace. + * allowing a hole to be left in the corefile to save disk space. * * Called without mmap_lock (takes and releases the mmap_lock by itself). */ diff --git a/mm/highmem.c b/mm/highmem.c index e389337e00b4..4fb51d735aa6 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -519,7 +519,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) /* * Disable migration so resulting virtual address is stable - * accross preemption. + * across preemption. */ migrate_disable(); preempt_disable(); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 98456017744d..63ed6b25deaa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1792,8 +1792,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, /* * Returns * - 0 if PMD could not be locked - * - 1 if PMD was locked but protections unchange and TLB flush unnecessary - * - HPAGE_PMD_NR is protections changed and TLB flush necessary + * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary + * - HPAGE_PMD_NR if protections changed and TLB flush necessary */ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, unsigned long cp_flags) @@ -2469,7 +2469,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, xa_lock(&swap_cache->i_pages); } - /* lock lru list/PageCompound, ref freezed by page_ref_freeze */ + /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ lruvec = lock_page_lruvec(head); for (i = nr - 1; i >= 1; i--) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 629aa4c2259c..3db405dea3dc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -466,7 +466,7 @@ static int allocate_file_region_entries(struct resv_map *resv, resv->region_cache_count; /* At this point, we should have enough entries in the cache - * for all the existings adds_in_progress. We should only be + * for all the existing adds_in_progress. We should only be * needing to allocate for regions_needed. */ VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); @@ -5536,8 +5536,8 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); /* - * vma need span at least one aligned PUD size and the start,end range - * must at least partialy within it. + * vma needs to span at least one aligned PUD size, and the range + * must be at least partially within in. */ if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || (*end <= v_start) || (*start >= v_end)) diff --git a/mm/internal.h b/mm/internal.h index feeaaf06705d..54bd0dc2c23c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -334,7 +334,7 @@ static inline bool is_exec_mapping(vm_flags_t flags) } /* - * Stack area - atomatically grows in one direction + * Stack area - automatically grows in one direction * * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: * do_mmap() forbids all other combinations. diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 3820ca54743b..8f450bc28045 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -55,9 +55,9 @@ extern bool kasan_flag_async __ro_after_init; #define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ #ifdef CONFIG_KASAN_HW_TAGS -#define KASAN_TAG_MIN 0xF0 /* mimimum value for random tags */ +#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */ #else -#define KASAN_TAG_MIN 0x00 /* mimimum value for random tags */ +#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */ #endif #ifdef CONFIG_KASAN_GENERIC @@ -403,7 +403,7 @@ static inline bool kasan_byte_accessible(const void *addr) #else /* CONFIG_KASAN_HW_TAGS */ /** - * kasan_poison - mark the memory range as unaccessible + * kasan_poison - mark the memory range as inaccessible * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE * @size - range size, must be aligned to KASAN_GRANULE_SIZE * @value - value that's written to metadata for the range @@ -434,7 +434,7 @@ bool kasan_byte_accessible(const void *addr); /** * kasan_poison_last_granule - mark the last granule of the memory range as - * unaccessible + * inaccessible * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE * @size - range size * diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 728fb24c5683..d8ccff4c1275 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -27,7 +27,7 @@ /* Data structure and operations for quarantine queues. */ /* - * Each queue is a signle-linked list, which also stores the total size of + * Each queue is a single-linked list, which also stores the total size of * objects inside of it. */ struct qlist_head { @@ -138,7 +138,7 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) local_irq_save(flags); /* - * As the object now gets freed from the quaratine, assume that its + * As the object now gets freed from the quarantine, assume that its * free track is no longer valid. */ *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE; diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 727ad4629173..082ee5b6d9a1 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -316,7 +316,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) * // rest of vmalloc process * STORE p, a LOAD shadow(x+99) * - * If there is no barrier between the end of unpoisioning the shadow + * If there is no barrier between the end of unpoisoning the shadow * and the store of the result to p, the stores could be committed * in a different order by CPU#0, and CPU#1 could erroneously observe * poison in the shadow. @@ -384,7 +384,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, * How does this work? * ------------------- * - * We have a region that is page aligned, labelled as A. + * We have a region that is page aligned, labeled as A. * That might not map onto the shadow in a way that is page-aligned: * * start end diff --git a/mm/kfence/report.c b/mm/kfence/report.c index e3f71451ad9e..2a319c21c939 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -263,6 +263,6 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r if (panic_on_warn) panic("panic_on_warn set ...\n"); - /* We encountered a memory unsafety error, taint the kernel! */ + /* We encountered a memory safety error, taint the kernel! */ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index ea74da3232ab..6c0185fdd815 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -667,7 +667,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * * The page table that maps the page has been already unlinked * from the page table tree and this process cannot get - * an additinal pin on the page. + * an additional pin on the page. * * New pins can come later if the page is shared across fork, * but not from this process. The other process cannot write to diff --git a/mm/ksm.c b/mm/ksm.c index b7cbcc7d4977..6bbe314c5260 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1065,7 +1065,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make - * with the pagecount against the mapcount is racey and + * with the pagecount against the mapcount is racy and * O_DIRECT can happen right after the check. * So we clear the pte and flush the tlb before the check * this assure us that no O_DIRECT can happen after the check @@ -1435,7 +1435,7 @@ static struct page *stable_node_dup(struct stable_node **_stable_node_dup, */ *_stable_node = found; /* - * Just for robustneess as stable_node is + * Just for robustness, as stable_node is * otherwise left as a stable pointer, the * compiler shall optimize it away at build * time. diff --git a/mm/madvise.c b/mm/madvise.c index 01fef79ac761..63e489e5bfdb 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -799,7 +799,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma, if (end > vma->vm_end) { /* * Don't fail if end > vma->vm_end. If the old - * vma was splitted while the mmap_lock was + * vma was split while the mmap_lock was * released the effect of the concurrent * operation may not cause madvise() to * have an undefined result. There may be an @@ -1039,7 +1039,7 @@ process_madvise_behavior_valid(int behavior) * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. * MADV_COLD - the application is not expected to use this memory soon, * deactivate pages in this range so that they can be reclaimed - * easily if memory pressure hanppens. + * easily if memory pressure happens. * MADV_PAGEOUT - the application is not expected to use this memory soon, * page out the pages in this range immediately. * diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3004afb6d090..64ada9e650a5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -215,7 +215,7 @@ enum res_type { #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) #define MEMFILE_ATTR(val) ((val) & 0xffff) -/* Used for OOM nofiier */ +/* Used for OOM notifier */ #define OOM_CONTROL (0) /* @@ -786,7 +786,7 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) * __count_memcg_events - account VM events in a cgroup * @memcg: the memory cgroup * @idx: the event item - * @count: the number of events that occured + * @count: the number of events that occurred */ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count) @@ -904,7 +904,7 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) rcu_read_lock(); do { /* - * Page cache insertions can happen withou an + * Page cache insertions can happen without an * actual mm context, e.g. during disk probing * on boot, loopback IO, acct() writes etc. */ @@ -1712,7 +1712,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) struct mem_cgroup *iter; /* - * Be careful about under_oom underflows becase a child memcg + * Be careful about under_oom underflows because a child memcg * could have been added after mem_cgroup_mark_under_oom. */ spin_lock(&memcg_oom_lock); @@ -1884,7 +1884,7 @@ bool mem_cgroup_oom_synchronize(bool handle) /* * There is no guarantee that an OOM-lock contender * sees the wakeups triggered by the OOM kill - * uncharges. Wake any sleepers explicitely. + * uncharges. Wake any sleepers explicitly. */ memcg_oom_recover(memcg); } @@ -4364,7 +4364,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, * Foreign dirty flushing * * There's an inherent mismatch between memcg and writeback. The former - * trackes ownership per-page while the latter per-inode. This was a + * tracks ownership per-page while the latter per-inode. This was a * deliberate design decision because honoring per-page ownership in the * writeback path is complicated, may lead to higher CPU and IO overheads * and deemed unnecessary given that write-sharing an inode across @@ -4379,9 +4379,9 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, * triggering background writeback. A will be slowed down without a way to * make writeback of the dirty pages happen. * - * Conditions like the above can lead to a cgroup getting repatedly and + * Conditions like the above can lead to a cgroup getting repeatedly and * severely throttled after making some progress after each - * dirty_expire_interval while the underyling IO device is almost + * dirty_expire_interval while the underlying IO device is almost * completely idle. * * Solving this problem completely requires matching the ownership tracking @@ -5774,7 +5774,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) return 0; /* - * We are now commited to this value whatever it is. Changes in this + * We are now committed to this value whatever it is. Changes in this * tunable will only affect upcoming migrations, not the current one. * So we need to save it, and keep it going. */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index bd3945446d47..85ad98c00fd9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -75,7 +75,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo if (dissolve_free_huge_page(page) || !take_page_off_buddy(page)) /* * We could fail to take off the target page from buddy - * for example due to racy page allocaiton, but that's + * for example due to racy page allocation, but that's * acceptable because soft-offlined page is not broken * and if someone really want to use it, they should * take it. diff --git a/mm/memory.c b/mm/memory.c index 8c491f813687..730daa00952b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3727,7 +3727,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) return ret; /* - * Archs like ppc64 need additonal space to store information + * Archs like ppc64 need additional space to store information * related to pte entry. Use the preallocated table for that. */ if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { @@ -4503,7 +4503,7 @@ retry_pud: } /** - * mm_account_fault - Do page fault accountings + * mm_account_fault - Do page fault accounting * * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting * of perf event counters, but we'll still do the per-task accounting to @@ -4512,9 +4512,9 @@ retry_pud: * @flags: the fault flags. * @ret: the fault retcode. * - * This will take care of most of the page fault accountings. Meanwhile, it + * This will take care of most of the page fault accounting. Meanwhile, it * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter - * updates. However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should + * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should * still be in per-arch page fault handlers at the entry of page fault. */ static inline void mm_account_fault(struct pt_regs *regs, @@ -4848,7 +4848,7 @@ out: /** * generic_access_phys - generic implementation for iomem mmap access * @vma: the vma to access - * @addr: userspace addres, not relative offset within @vma + * @addr: userspace address, not relative offset within @vma * @buf: buffer to read/write * @len: length of transfer * @write: set to FOLL_WRITE when writing, otherwise reading diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3ebe2cfc64af..5690513c5668 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1867,7 +1867,7 @@ static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. * * policy->v.nodes is intersect with node_states[N_MEMORY]. - * so if the following test faile, it implies + * so if the following test fails, it implies * policy->v.nodes has movable memory only. */ if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) @@ -2098,7 +2098,7 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask) * * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default * policy. Otherwise, check for intersection between mask and the policy - * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' + * nodemask for 'bind' or 'interleave' policy. For 'preferred' or 'local' * policy, always return true since it may allocate elsewhere on fallback. * * Takes task_lock(tsk) to prevent freeing of its mempolicy. diff --git a/mm/migrate.c b/mm/migrate.c index 6b37d00890ca..b234c3f3acb7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2779,11 +2779,11 @@ restore: * * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus - * allowing the caller to allocate device memory for those unback virtual - * address. For this the caller simply has to allocate device memory and + * allowing the caller to allocate device memory for those unbacked virtual + * addresses. For this the caller simply has to allocate device memory and * properly set the destination entry like for regular migration. Note that - * this can still fails and thus inside the device driver must check if the - * migration was successful for those entries after calling migrate_vma_pages() + * this can still fail, and thus inside the device driver you must check if the + * migration was successful for those entries after calling migrate_vma_pages(), * just like for regular migration. * * After that, the callers must call migrate_vma_pages() to go over each entry diff --git a/mm/mmap.c b/mm/mmap.c index c1b848fa7da6..0584e540246e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -612,7 +612,7 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long nr_pages = 0; struct vm_area_struct *vma; - /* Find first overlaping mapping */ + /* Find first overlapping mapping */ vma = find_vma_intersection(mm, addr, end); if (!vma) return 0; @@ -2875,7 +2875,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas - * will remain splitted, but userland will get a + * will remain split, but userland will get a * highly unexpected error anyway. This is no * different than the case where the first of the two * __split_vma fails, but we don't undo the first diff --git a/mm/mprotect.c b/mm/mprotect.c index 94188df1ee55..e7a443157988 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -699,7 +699,7 @@ SYSCALL_DEFINE1(pkey_free, int, pkey) mmap_write_unlock(current->mm); /* - * We could provie warnings or errors if any VMA still + * We could provide warnings or errors if any VMA still * has the pkey set here. */ return ret; diff --git a/mm/mremap.c b/mm/mremap.c index d22629ff8f3c..47c255b60150 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -730,7 +730,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, * So, to avoid such scenario we can pre-compute if the whole * operation has high chances to success map-wise. * Worst-scenario case is when both vma's (new_addr and old_addr) get - * split in 3 before unmaping it. + * split in 3 before unmapping it. * That means 2 more maps (1 for each) to the ones we already hold. * Check whether current map count plus 2 still leads us to 4 maps below * the threshold, otherwise return -ENOMEM here to be more safe. diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3df2ac6b8686..eefd3f5fde46 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -74,7 +74,7 @@ static inline bool is_memcg_oom(struct oom_control *oc) #ifdef CONFIG_NUMA /** - * oom_cpuset_eligible() - check task eligiblity for kill + * oom_cpuset_eligible() - check task eligibility for kill * @start: task struct of which task to consider * @oc: pointer to struct oom_control * diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5e761fb62800..0062d5c57d41 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1806,7 +1806,7 @@ pause: break; /* - * In the case of an unresponding NFS server and the NFS dirty + * In the case of an unresponsive NFS server and the NFS dirty * pages exceeds dirty_thresh, give the other good wb's a pipe * to go through, so that tasks on them still remain responsive. * @@ -2216,7 +2216,7 @@ int write_cache_pages(struct address_space *mapping, * Page truncated or invalidated. We can freely skip it * then, even for data integrity operations: the page * has disappeared concurrently, so there could be no - * real expectation of this data interity operation + * real expectation of this data integrity operation * even if there is now a new, dirty page at the same * pagecache address. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bcdc0c6f21f1..0582c85da08c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -893,7 +893,7 @@ compaction_capture(struct capture_control *capc, struct page *page, return false; /* - * Do not let lower order allocations polluate a movable pageblock. + * Do not let lower order allocations pollute a movable pageblock. * This might let an unmovable request use a reclaimable pageblock * and vice-versa but no more than normal fallback logic which can * have trouble finding a high-order free page. @@ -2776,7 +2776,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, /* * In page freeing path, migratetype change is racy so * we can counter several free pages in a pageblock - * in this loop althoug we changed the pageblock type + * in this loop although we changed the pageblock type * from highatomic to ac->migratetype. So we should * adjust the count once. */ @@ -3080,7 +3080,7 @@ static void drain_local_pages_wq(struct work_struct *work) * drain_all_pages doesn't use proper cpu hotplug protection so * we can race with cpu offline when the WQ can move this from * a cpu pinned worker to an unbound one. We can operate on a different - * cpu which is allright but we also have to make sure to not move to + * cpu which is alright but we also have to make sure to not move to * a different one. */ preempt_disable(); @@ -5929,7 +5929,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) static int __parse_numa_zonelist_order(char *s) { /* - * We used to support different zonlists modes but they turned + * We used to support different zonelists modes but they turned * out to be just not useful. Let's keep the warning in place * if somebody still use the cmd line parameter so that we do * not fail it silently @@ -7670,7 +7670,7 @@ static void check_for_memory(pg_data_t *pgdat, int nid) } /* - * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For + * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For * such cases we allow max_zone_pfn sorted in the descending order */ bool __weak arch_has_descending_max_zone_pfns(void) @@ -8728,7 +8728,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, * alloc_contig_range() -- tries to allocate given range of pages * @start: start PFN to allocate * @end: one-past-the-last PFN to allocate - * @migratetype: migratetype of the underlaying pageblocks (either + * @migratetype: migratetype of the underlying pageblocks (either * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks * in range must have the same migratetype and it must * be either of the two. @@ -8988,7 +8988,7 @@ EXPORT_SYMBOL(free_contig_range); /* * The zone indicated has a new number of managed_pages; batch sizes and percpu - * page high values need to be recalulated. + * page high values need to be recalculated. */ void __meminit zone_pcp_update(struct zone *zone) { diff --git a/mm/page_owner.c b/mm/page_owner.c index 9661d5320a07..adfabb560eb9 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -233,7 +233,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) /* * We don't clear the bit on the oldpage as it's going to be freed * after migration. Until then, the info can be useful in case of - * a bug, and the overal stats will be off a bit only temporarily. + * a bug, and the overall stats will be off a bit only temporarily. * Also, migrate_misplaced_transhuge_page() can still fail the * migration and then we want the oldpage to retain the info. But * in that case we also don't need to explicitly clear the info from diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h index 095d7eaa0db4..ae26b118e246 100644 --- a/mm/percpu-internal.h +++ b/mm/percpu-internal.h @@ -170,7 +170,7 @@ struct percpu_stats { u64 nr_max_alloc; /* max # of live allocations */ u32 nr_chunks; /* current # of live chunks */ u32 nr_max_chunks; /* max # of live chunks */ - size_t min_alloc_size; /* min allocaiton size */ + size_t min_alloc_size; /* min allocation size */ size_t max_alloc_size; /* max allocation size */ }; diff --git a/mm/percpu.c b/mm/percpu.c index 23308113a5ff..f99e9306b939 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1862,7 +1862,7 @@ fail: pr_info("limit reached, disable warning\n"); } if (is_atomic) { - /* see the flag handling in pcpu_blance_workfn() */ + /* see the flag handling in pcpu_balance_workfn() */ pcpu_atomic_alloc_failed = true; pcpu_schedule_balance_work(); } else { diff --git a/mm/pgalloc-track.h b/mm/pgalloc-track.h index 1dcc865029a2..e9e879de8649 100644 --- a/mm/pgalloc-track.h +++ b/mm/pgalloc-track.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_PGALLLC_TRACK_H -#define _LINUX_PGALLLC_TRACK_H +#ifndef _LINUX_PGALLOC_TRACK_H +#define _LINUX_PGALLOC_TRACK_H #if defined(CONFIG_MMU) static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, @@ -48,4 +48,4 @@ static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\ NULL: pte_offset_kernel(pmd, address)) -#endif /* _LINUX_PGALLLC_TRACK_H */ +#endif /* _LINUX_PGALLOC_TRACK_H */ diff --git a/mm/slab.c b/mm/slab.c index d56607a80fa6..d0f725637663 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -259,7 +259,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) #define BATCHREFILL_LIMIT 16 /* - * Optimization question: fewer reaps means less probability for unnessary + * Optimization question: fewer reaps means less probability for unnecessary * cpucache drain/refill cycles. * * OTOH the cpuarrays can contain lots of objects, @@ -2381,8 +2381,8 @@ union freelist_init_state { }; /* - * Initialize the state based on the randomization methode available. - * return true if the pre-computed list is available, false otherwize. + * Initialize the state based on the randomization method available. + * return true if the pre-computed list is available, false otherwise. */ static bool freelist_state_initialize(union freelist_init_state *state, struct kmem_cache *cachep, diff --git a/mm/slub.c b/mm/slub.c index 68123b21e65f..feda53ae62ba 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3391,7 +3391,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); */ /* - * Mininum / Maximum order of slab pages. This influences locking overhead + * Minimum / Maximum order of slab pages. This influences locking overhead * and slab fragmentation. A higher order reduces the number of partial slabs * and increases the number of allocations possible without having to * take the list_lock. diff --git a/mm/swap_slots.c b/mm/swap_slots.c index be9de6d5b516..6248d1030a9b 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -16,7 +16,7 @@ * to local caches without needing to acquire swap_info * lock. We do not reuse the returned slots directly but * move them back to the global pool in a batch. This - * allows the slots to coaellesce and reduce fragmentation. + * allows the slots to coalesce and reduce fragmentation. * * The swap entry allocated is marked with SWAP_HAS_CACHE * flag in map_count that prevents it from being allocated diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a7f318c9e426..a13ac524f6ff 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1583,7 +1583,7 @@ static unsigned long lazy_max_pages(void) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); /* - * Serialize vmap purging. There is no actual criticial section protected + * Serialize vmap purging. There is no actual critical section protected * by this look, but we want to avoid concurrent calls for performance * reasons and to make the pcpu_get_vm_areas more deterministic. */ @@ -2628,7 +2628,7 @@ static void __vfree(const void *addr) * May sleep if called *not* from interrupt context. * Must not be called in NMI context (strictly speaking, it could be * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling - * conventions for vfree() arch-depenedent would be a really bad idea). + * conventions for vfree() arch-dependent would be a really bad idea). */ void vfree(const void *addr) { @@ -3141,7 +3141,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add - * overhead of vmalloc()/vfree() calles for this _debug_ + * overhead of vmalloc()/vfree() calls for this _debug_ * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ diff --git a/mm/vmstat.c b/mm/vmstat.c index 5ba118521ded..cccee36b289c 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -934,7 +934,7 @@ void cpu_vm_stats_fold(int cpu) /* * this is only called if !populated_zone(zone), which implies no other users of - * pset->vm_stat_diff[] exsist. + * pset->vm_stat_diff[] exist. */ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) { diff --git a/mm/zpool.c b/mm/zpool.c index 5ed71207ced7..6d9ed48141e5 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -336,7 +336,7 @@ int zpool_shrink(struct zpool *zpool, unsigned int pages, * This may hold locks, disable interrupts, and/or preemption, * and the zpool_unmap_handle() must be called to undo those * actions. The code that uses the mapped handle should complete - * its operatons on the mapped handle memory quickly and unmap + * its operations on the mapped handle memory quickly and unmap * as soon as possible. As the implementation may use per-cpu * data, multiple handles should not be mapped concurrently on * any cpu. diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 5004c176b045..19b563bc6c48 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1227,7 +1227,7 @@ EXPORT_SYMBOL_GPL(zs_get_total_pages); * zs_map_object - get address of allocated object from handle. * @pool: pool from which the object was allocated * @handle: handle returned from zs_malloc - * @mm: maping mode to use + * @mm: mapping mode to use * * Before using an object allocated from zs_malloc, it must be mapped using * this function. When done with the object, it must be unmapped using -- cgit v1.2.3-71-gd317 From 0ab1438bad43d95877f848b7df551bd431680270 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 6 May 2021 02:45:15 +0900 Subject: linux/kconfig.h: replace IF_ENABLED() with PTR_IF() in is included from all the kernel-space source files, including C, assembly, linker scripts. It is intended to contain a minimal set of macros to evaluate CONFIG options. IF_ENABLED() is an intruder here because (x ? y : z) is C code, which should not be included from assembly files or linker scripts. Also, is no longer self-contained because NULL is defined in . Move IF_ENABLED() out to as PTR_IF(). PTF_IF() takes the general boolean expression instead of a CONFIG option so that it fits better in . Signed-off-by: Masahiro Yamada Reviewed-by: Kees Cook --- drivers/pinctrl/pinctrl-ingenic.c | 3 +++ include/linux/kconfig.h | 6 ------ include/linux/kernel.h | 2 ++ 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 651a36b9dcc0..983ba9865f77 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -3854,6 +3855,8 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev) return 0; } +#define IF_ENABLED(cfg, ptr) PTR_IF(IS_ENABLED(cfg), (ptr)) + static const struct of_device_id ingenic_pinctrl_of_match[] = { { .compatible = "ingenic,jz4730-pinctrl", diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index 24a59cb06963..cc8fa109cfa3 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -70,10 +70,4 @@ */ #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) -/* - * IF_ENABLED(CONFIG_FOO, ptr) evaluates to (ptr) if CONFIG_FOO is set to 'y' - * or 'm', NULL otherwise. - */ -#define IF_ENABLED(option, ptr) (IS_ENABLED(option) ? (ptr) : NULL) - #endif /* __LINUX_KCONFIG_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5b7ed6dc99ac..2f9d15410c93 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -48,6 +48,8 @@ */ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) + #define u64_to_user_ptr(x) ( \ { \ typecheck(u64, (x)); \ -- cgit v1.2.3-71-gd317 From 35c820e71565d1fa835b82499359218b219828ac Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 8 May 2021 21:49:48 -0600 Subject: Revert "bio: limit bio max size" This reverts commit cd2c7545ae1beac3b6aae033c7f31193b3255946. Alex reports that the commit causes corruption with LUKS on ext4. Revert it for now so that this can be investigated properly. Link: https://lore.kernel.org/linux-block/1620493841.bxdq8r5haw.none@localhost/ Reported-by: Alex Xu (Hello71) Signed-off-by: Jens Axboe --- block/bio.c | 13 ++----------- block/blk-settings.c | 5 ----- include/linux/bio.h | 4 +--- include/linux/blkdev.h | 2 -- 4 files changed, 3 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index 221dc56ba22f..44205dfb6b60 100644 --- a/block/bio.c +++ b/block/bio.c @@ -255,13 +255,6 @@ void bio_init(struct bio *bio, struct bio_vec *table, } EXPORT_SYMBOL(bio_init); -unsigned int bio_max_size(struct bio *bio) -{ - struct block_device *bdev = bio->bi_bdev; - - return bdev ? bdev->bd_disk->queue->limits.bio_max_bytes : UINT_MAX; -} - /** * bio_reset - reinitialize a bio * @bio: bio to reset @@ -873,7 +866,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { - if (bio->bi_iter.bi_size > bio_max_size(bio) - len) { + if (bio->bi_iter.bi_size > UINT_MAX - len) { *same_page = false; return false; } @@ -1002,7 +995,6 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; - unsigned int bytes_left = bio_max_size(bio) - bio->bi_iter.bi_size; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct page **pages = (struct page **)bv; bool same_page = false; @@ -1018,8 +1010,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); - size = iov_iter_get_pages(iter, pages, bytes_left, nr_pages, - &offset); + size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); if (unlikely(size <= 0)) return size ? size : -EFAULT; diff --git a/block/blk-settings.c b/block/blk-settings.c index c646503e55d2..9c009090c4b5 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -31,7 +31,6 @@ EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); */ void blk_set_default_limits(struct queue_limits *lim) { - lim->bio_max_bytes = UINT_MAX; lim->max_segments = BLK_MAX_SEGMENTS; lim->max_discard_segments = 1; lim->max_integrity_segments = 0; @@ -140,10 +139,6 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto limits->logical_block_size >> SECTOR_SHIFT); limits->max_sectors = max_sectors; - if (check_shl_overflow(max_sectors, SECTOR_SHIFT, - &limits->bio_max_bytes)) - limits->bio_max_bytes = UINT_MAX; - q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); } EXPORT_SYMBOL(blk_queue_max_hw_sectors); diff --git a/include/linux/bio.h b/include/linux/bio.h index f1a99f0a240c..a0b4cfdf62a4 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -106,8 +106,6 @@ static inline void *bio_data(struct bio *bio) return NULL; } -extern unsigned int bio_max_size(struct bio *bio); - /** * bio_full - check if the bio is full * @bio: bio to check @@ -121,7 +119,7 @@ static inline bool bio_full(struct bio *bio, unsigned len) if (bio->bi_vcnt >= bio->bi_max_vecs) return true; - if (bio->bi_iter.bi_size > bio_max_size(bio) - len) + if (bio->bi_iter.bi_size > UINT_MAX - len) return true; return false; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 40c7c4d87aa1..b91ba6207365 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -327,8 +327,6 @@ enum blk_bounce { }; struct queue_limits { - unsigned int bio_max_bytes; - enum blk_bounce bounce; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; -- cgit v1.2.3-71-gd317 From 28ec344bb8911bb0d4910456b22ba0dd4f662521 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Wed, 5 May 2021 17:44:22 -0700 Subject: usb: typec: tcpm: Don't block probing of consumers of "connector" nodes fw_devlink expects DT device nodes with "compatible" property to have struct devices created for them. Since the connector node might not be populated as a device, mark it as such so that fw_devlink knows not to wait on this fwnode being populated as a struct device. Without this patch, USB functionality can be broken on some boards. Fixes: f7514a663016 ("of: property: fw_devlink: Add support for remote-endpoint") Reported-by: John Stultz Tested-by: John Stultz Signed-off-by: Saravana Kannan Link: https://lore.kernel.org/r/20210506004423.345199-1-saravanak@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 3 ++- drivers/usb/typec/tcpm/tcpm.c | 9 +++++++++ include/linux/fwnode.h | 1 + 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/base/core.c b/drivers/base/core.c index 4a8bf8cda52b..628e33939aca 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -150,7 +150,7 @@ void fwnode_links_purge(struct fwnode_handle *fwnode) fwnode_links_purge_consumers(fwnode); } -static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) +void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) { struct fwnode_handle *child; @@ -164,6 +164,7 @@ static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) fwnode_for_each_available_child_node(fwnode, child) fw_devlink_purge_absent_suppliers(child); } +EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers); #ifdef CONFIG_SRCU static DEFINE_MUTEX(device_links_lock); diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index c4fdc00a3bc8..bffa342d4e38 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -5754,6 +5754,15 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, if (!fwnode) return -EINVAL; + /* + * This fwnode has a "compatible" property, but is never populated as a + * struct device. Instead we simply parse it to read the properties. + * This it breaks fw_devlink=on. To maintain backward compatibility + * with existing DT files, we work around this by deleting any + * fwnode_links to/from this fwnode. + */ + fw_devlink_purge_absent_suppliers(fwnode); + /* USB data support is optional */ ret = fwnode_property_read_string(fwnode, "data-role", &cap_str); if (ret == 0) { diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index ed4e67a7ff1c..59828516ebaf 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -187,5 +187,6 @@ extern u32 fw_devlink_get_flags(void); extern bool fw_devlink_is_strict(void); int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup); void fwnode_links_purge(struct fwnode_handle *fwnode); +void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode); #endif -- cgit v1.2.3-71-gd317 From c745253e2a691a40c66790defe85c104a887e14a Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 5 May 2021 14:09:15 +0300 Subject: PM: runtime: Fix unpaired parent child_count for force_resume As pm_runtime_need_not_resume() relies also on usage_count, it can return a different value in pm_runtime_force_suspend() compared to when called in pm_runtime_force_resume(). Different return values can happen if anything calls PM runtime functions in between, and causes the parent child_count to increase on every resume. So far I've seen the issue only for omapdrm that does complicated things with PM runtime calls during system suspend for legacy reasons: omap_atomic_commit_tail() for omapdrm.0 dispc_runtime_get() wakes up 58000000.dss as it's the dispc parent dispc_runtime_resume() rpm_resume() increases parent child_count dispc_runtime_put() won't idle, PM runtime suspend blocked pm_runtime_force_suspend() for 58000000.dss, !pm_runtime_need_not_resume() __update_runtime_status() system suspended pm_runtime_force_resume() for 58000000.dss, pm_runtime_need_not_resume() pm_runtime_enable() only called because of pm_runtime_need_not_resume() omap_atomic_commit_tail() for omapdrm.0 dispc_runtime_get() wakes up 58000000.dss as it's the dispc parent dispc_runtime_resume() rpm_resume() increases parent child_count dispc_runtime_put() won't idle, PM runtime suspend blocked ... rpm_suspend for 58000000.dss but parent child_count is now unbalanced Let's fix the issue by adding a flag for needs_force_resume and use it in pm_runtime_force_resume() instead of pm_runtime_need_not_resume(). Additionally omapdrm system suspend could be simplified later on to avoid lots of unnecessary PM runtime calls and the complexity it adds. The driver can just use internal functions that are shared between the PM runtime and system suspend related functions. Fixes: 4918e1f87c5f ("PM / runtime: Rework pm_runtime_force_suspend/resume()") Signed-off-by: Tony Lindgren Reviewed-by: Ulf Hansson Tested-by: Tomi Valkeinen Cc: 4.16+ # 4.16+ Signed-off-by: Rafael J. Wysocki --- drivers/base/power/runtime.c | 10 +++++++--- include/linux/pm.h | 1 + 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 1fc1a992f90c..b570848d23e0 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; + dev->power.needs_force_resume = 0; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; @@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev) * its parent, but set its status to RPM_SUSPENDED anyway in case this * function will be called again for it in the meantime. */ - if (pm_runtime_need_not_resume(dev)) + if (pm_runtime_need_not_resume(dev)) { pm_runtime_set_suspended(dev); - else + } else { __update_runtime_status(dev, RPM_SUSPENDED); + dev->power.needs_force_resume = 1; + } return 0; @@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev) int (*callback)(struct device *); int ret = 0; - if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) + if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) goto out; /* @@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev) pm_runtime_mark_last_busy(dev); out: + dev->power.needs_force_resume = 0; pm_runtime_enable(dev); return ret; } diff --git a/include/linux/pm.h b/include/linux/pm.h index c9657408fee1..1d8209c09686 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -601,6 +601,7 @@ struct dev_pm_info { unsigned int idle_notification:1; unsigned int request_pending:1; unsigned int deferred_resume:1; + unsigned int needs_force_resume:1; unsigned int runtime_auto:1; bool ignore_children:1; unsigned int no_callbacks:1; -- cgit v1.2.3-71-gd317 From 2515dd6ce8e545b0b2eece84920048ef9ed846c4 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 19 Apr 2021 16:17:41 -0700 Subject: stack: Replace "o" output with "r" input constraint "o" isn't a common asm() constraint to use; it triggers an assertion in assert-enabled builds of LLVM that it's not recognized when targeting aarch64 (though it appears to fall back to "m"). It's fixed in LLVM 13 now, but there isn't really a good reason to use "o" in particular here. To avoid causing build issues for those using assert-enabled builds of earlier LLVM versions, the constraint needs changing. Instead, if the point is to retain the __builtin_alloca(), make ptr appear to "escape" via being an input to an empty inline asm block. This is preferable anyways, since otherwise this looks like a dead store. While the use of "r" was considered in https://lore.kernel.org/lkml/202104011447.2E7F543@keescook/ it was only tested as an output (which looks like a dead store, and wasn't sufficient). Use "r" as an input constraint instead, which behaves correctly across compilers and architectures. Fixes: 39218ff4c625 ("stack: Optionally randomize kernel stack offset each syscall") Signed-off-by: Nick Desaulniers Signed-off-by: Kees Cook Signed-off-by: Thomas Gleixner Tested-by: Kees Cook Tested-by: Nathan Chancellor Reviewed-by: Nathan Chancellor Link: https://reviews.llvm.org/D100412 Link: https://bugs.llvm.org/show_bug.cgi?id=49956 Link: https://lore.kernel.org/r/20210419231741.4084415-1-keescook@chromium.org --- include/linux/randomize_kstack.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h index fd80fab663a9..bebc911161b6 100644 --- a/include/linux/randomize_kstack.h +++ b/include/linux/randomize_kstack.h @@ -38,7 +38,7 @@ void *__builtin_alloca(size_t size); u32 offset = raw_cpu_read(kstack_offset); \ u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \ /* Keep allocation even after "ptr" loses scope. */ \ - asm volatile("" : "=o"(*ptr) :: "memory"); \ + asm volatile("" :: "r"(ptr) : "memory"); \ } \ } while (0) -- cgit v1.2.3-71-gd317 From 35f3f8504c3b60a1ae5576e178b27fc0ddd6157d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 10 May 2021 16:12:42 +0300 Subject: spi: Switch to signed types for *_native_cs SPI controller fields While fixing undefined behaviour the commit f60d7270c8a3 ("spi: Avoid undefined behaviour when counting unused native CSs") missed the case when all CSs are GPIOs and thus unused_native_cs will be evaluated to -1 in unsigned representation. This will falsely trigger a condition in the spi_get_gpio_descs(). Switch to signed types for *_native_cs SPI controller fields to fix above. Fixes: f60d7270c8a3 ("spi: Avoid undefined behaviour when counting unused native CSs") Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20210510131242.49455-1-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown --- include/linux/spi/spi.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 360a3bc767ca..74239d65c7fd 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -644,8 +644,8 @@ struct spi_controller { int *cs_gpios; struct gpio_desc **cs_gpiods; bool use_gpio_descriptors; - u8 unused_native_cs; - u8 max_native_cs; + s8 unused_native_cs; + s8 max_native_cs; /* statistics */ struct spi_statistics statistics; -- cgit v1.2.3-71-gd317 From efed9a3337e341bd0989161b97453b52567bc59d Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 10 May 2021 17:05:35 -0700 Subject: kyber: fix out of bounds access when preempted __blk_mq_sched_bio_merge() gets the ctx and hctx for the current CPU and passes the hctx to ->bio_merge(). kyber_bio_merge() then gets the ctx for the current CPU again and uses that to get the corresponding Kyber context in the passed hctx. However, the thread may be preempted between the two calls to blk_mq_get_ctx(), and the ctx returned the second time may no longer correspond to the passed hctx. This "works" accidentally most of the time, but it can cause us to read garbage if the second ctx came from an hctx with more ctx's than the first one (i.e., if ctx->index_hw[hctx->type] > hctx->nr_ctx). This manifested as this UBSAN array index out of bounds error reported by Jakub: UBSAN: array-index-out-of-bounds in ../kernel/locking/qspinlock.c:130:9 index 13106 is out of range for type 'long unsigned int [128]' Call Trace: dump_stack+0xa4/0xe5 ubsan_epilogue+0x5/0x40 __ubsan_handle_out_of_bounds.cold.13+0x2a/0x34 queued_spin_lock_slowpath+0x476/0x480 do_raw_spin_lock+0x1c2/0x1d0 kyber_bio_merge+0x112/0x180 blk_mq_submit_bio+0x1f5/0x1100 submit_bio_noacct+0x7b0/0x870 submit_bio+0xc2/0x3a0 btrfs_map_bio+0x4f0/0x9d0 btrfs_submit_data_bio+0x24e/0x310 submit_one_bio+0x7f/0xb0 submit_extent_page+0xc4/0x440 __extent_writepage_io+0x2b8/0x5e0 __extent_writepage+0x28d/0x6e0 extent_write_cache_pages+0x4d7/0x7a0 extent_writepages+0xa2/0x110 do_writepages+0x8f/0x180 __writeback_single_inode+0x99/0x7f0 writeback_sb_inodes+0x34e/0x790 __writeback_inodes_wb+0x9e/0x120 wb_writeback+0x4d2/0x660 wb_workfn+0x64d/0xa10 process_one_work+0x53a/0xa80 worker_thread+0x69/0x5b0 kthread+0x20b/0x240 ret_from_fork+0x1f/0x30 Only Kyber uses the hctx, so fix it by passing the request_queue to ->bio_merge() instead. BFQ and mq-deadline just use that, and Kyber can map the queues itself to avoid the mismatch. Fixes: a6088845c2bf ("block: kyber: make kyber more friendly with merging") Reported-by: Jakub Kicinski Signed-off-by: Omar Sandoval Link: https://lore.kernel.org/r/c7598605401a48d5cfeadebb678abd10af22b83f.1620691329.git.osandov@fb.com Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 3 +-- block/blk-mq-sched.c | 8 +++++--- block/kyber-iosched.c | 5 +++-- block/mq-deadline.c | 3 +-- include/linux/elevator.h | 2 +- 5 files changed, 11 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 0270cd7ca165..59b2499d3f8b 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2263,10 +2263,9 @@ static void bfq_remove_request(struct request_queue *q, } -static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, +static bool bfq_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { - struct request_queue *q = hctx->queue; struct bfq_data *bfqd = q->elevator->elevator_data; struct request *free = NULL; /* diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 42a365b1b9c0..996a4b2f73aa 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -358,14 +358,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { struct elevator_queue *e = q->elevator; - struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; bool ret = false; enum hctx_type type; if (e && e->type->ops.bio_merge) - return e->type->ops.bio_merge(hctx, bio, nr_segs); + return e->type->ops.bio_merge(q, bio, nr_segs); + ctx = blk_mq_get_ctx(q); + hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); type = hctx->type; if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || list_empty_careful(&ctx->rq_lists[type])) diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 8969e122f081..81e3279ecd57 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -561,11 +561,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) } } -static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, +static bool kyber_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { + struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); struct kyber_hctx_data *khd = hctx->sched_data; - struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue); struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; unsigned int sched_domain = kyber_sched_domain(bio->bi_opf); struct list_head *rq_list = &kcq->rq_list[sched_domain]; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 04aded71ead2..8eea2cbf2bf4 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq, return ELEVATOR_NO_MERGE; } -static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, +static bool dd_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { - struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; struct request *free = NULL; bool ret; diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 1fe8e105b83b..dcb2f9022c1d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -34,7 +34,7 @@ struct elevator_mq_ops { void (*depth_updated)(struct blk_mq_hw_ctx *); bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); - bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int); + bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); int (*request_merge)(struct request_queue *q, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); -- cgit v1.2.3-71-gd317 From 190515f610946db025cdedebde93958b725fb583 Mon Sep 17 00:00:00 2001 From: Lin Feng Date: Wed, 12 May 2021 18:01:24 +0800 Subject: blkdev.h: remove unused codes blk_account_rq Last users of blk_account_rq gone with patch commit a1ce35fa49852db ("block: remove dead elevator code") and now it gets no caller, it can be safely removed. Signed-off-by: Lin Feng Link: https://lore.kernel.org/r/20210512100124.173769-1-linf@wangsu.com Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b91ba6207365..26c3e368656f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -677,11 +677,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); -static inline bool blk_account_rq(struct request *rq) -{ - return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); -} - #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) -- cgit v1.2.3-71-gd317 From 681865a03d3ec6ac3dda147044ed2a1a0f49f7bf Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Mon, 19 Apr 2021 19:27:25 +0800 Subject: libnvdimm: Remove duplicate struct declaration struct device is declared at 133rd line. The second declaration is unnecessary, remove it. Signed-off-by: Wan Jiabing Link: https://lore.kernel.org/r/20210419112725.42145-1-wanjiabing@vivo.com Signed-off-by: Dan Williams --- include/linux/libnvdimm.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 01f251b6e36c..89b69e645ac7 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -141,7 +141,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, struct nvdimm_bus; struct module; -struct device; struct nd_blk_region; struct nd_blk_region_desc { int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); -- cgit v1.2.3-71-gd317 From 860dafa902595fb5f1d23bbcce1215188c3341e6 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Thu, 13 May 2021 11:51:50 +0200 Subject: vt: Fix character height handling with VT_RESIZEX Restore the original intent of the VT_RESIZEX ioctl's `v_clin' parameter which is the number of pixel rows per character (cell) rather than the height of the font used. For framebuffer devices the two values are always the same, because the former is inferred from the latter one. For VGA used as a true text mode device these two parameters are independent from each other: the number of pixel rows per character is set in the CRT controller, while font height is in fact hardwired to 32 pixel rows and fonts of heights below that value are handled by padding their data with blanks when loaded to hardware for use by the character generator. One can change the setting in the CRT controller and it will update the screen contents accordingly regardless of the font loaded. The `v_clin' parameter is used by the `vgacon' driver to set the height of the character cell and then the cursor position within. Make the parameter explicit then, by defining a new `vc_cell_height' struct member of `vc_data', set it instead of `vc_font.height' from `v_clin' in the VT_RESIZEX ioctl, and then use it throughout the `vgacon' driver except where actual font data is accessed which as noted above is independent from the CRTC setting. This way the framebuffer console driver is free to ignore the `v_clin' parameter as irrelevant, as it always should have, avoiding any issues attempts to give the parameter a meaning there could have caused, such as one that has led to commit 988d0763361b ("vt_ioctl: make VT_RESIZEX behave like VT_RESIZE"): "syzbot is reporting UAF/OOB read at bit_putcs()/soft_cursor() [1][2], for vt_resizex() from ioctl(VT_RESIZEX) allows setting font height larger than actual font height calculated by con_font_set() from ioctl(PIO_FONT). Since fbcon_set_font() from con_font_set() allocates minimal amount of memory based on actual font height calculated by con_font_set(), use of vt_resizex() can cause UAF/OOB read for font data." The problem first appeared around Linux 2.5.66 which predates our repo history, but the origin could be identified with the old MIPS/Linux repo also at: as commit 9736a3546de7 ("Merge with Linux 2.5.66."), where VT_RESIZEX code in `vt_ioctl' was updated as follows: if (clin) - video_font_height = clin; + vc->vc_font.height = clin; making the parameter apply to framebuffer devices as well, perhaps due to the use of "font" in the name of the original `video_font_height' variable. Use "cell" in the new struct member then to avoid ambiguity. References: [1] https://syzkaller.appspot.com/bug?id=32577e96d88447ded2d3b76d71254fb855245837 [2] https://syzkaller.appspot.com/bug?id=6b8355d27b2b94fb5cedf4655e3a59162d9e48e3 Signed-off-by: Maciej W. Rozycki Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable@vger.kernel.org # v2.6.12+ Signed-off-by: Linus Torvalds --- drivers/tty/vt/vt_ioctl.c | 6 +++--- drivers/video/console/vgacon.c | 44 +++++++++++++++++++++--------------------- include/linux/console_struct.h | 1 + 3 files changed, 26 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 95d10197566b..0e0cd9e9e589 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -706,17 +706,17 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs) if (vcp) { int ret; int save_scan_lines = vcp->vc_scan_lines; - int save_font_height = vcp->vc_font.height; + int save_cell_height = vcp->vc_cell_height; if (v.v_vlin) vcp->vc_scan_lines = v.v_vlin; if (v.v_clin) - vcp->vc_font.height = v.v_clin; + vcp->vc_cell_height = v.v_clin; vcp->vc_resize_user = 1; ret = vc_resize(vcp, v.v_cols, v.v_rows); if (ret) { vcp->vc_scan_lines = save_scan_lines; - vcp->vc_font.height = save_font_height; + vcp->vc_cell_height = save_cell_height; console_unlock(); return ret; } diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 511e7d06b148..631eb918f8e1 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -383,7 +383,7 @@ static void vgacon_init(struct vc_data *c, int init) vc_resize(c, vga_video_num_columns, vga_video_num_lines); c->vc_scan_lines = vga_scan_lines; - c->vc_font.height = vga_video_font_height; + c->vc_font.height = c->vc_cell_height = vga_video_font_height; c->vc_complement_mask = 0x7700; if (vga_512_chars) c->vc_hi_font_mask = 0x0800; @@ -518,32 +518,32 @@ static void vgacon_cursor(struct vc_data *c, int mode) switch (CUR_SIZE(c->vc_cursor_type)) { case CUR_UNDERLINE: vgacon_set_cursor_size(c->state.x, - c->vc_font.height - - (c->vc_font.height < + c->vc_cell_height - + (c->vc_cell_height < 10 ? 2 : 3), - c->vc_font.height - - (c->vc_font.height < + c->vc_cell_height - + (c->vc_cell_height < 10 ? 1 : 2)); break; case CUR_TWO_THIRDS: vgacon_set_cursor_size(c->state.x, - c->vc_font.height / 3, - c->vc_font.height - - (c->vc_font.height < + c->vc_cell_height / 3, + c->vc_cell_height - + (c->vc_cell_height < 10 ? 1 : 2)); break; case CUR_LOWER_THIRD: vgacon_set_cursor_size(c->state.x, - (c->vc_font.height * 2) / 3, - c->vc_font.height - - (c->vc_font.height < + (c->vc_cell_height * 2) / 3, + c->vc_cell_height - + (c->vc_cell_height < 10 ? 1 : 2)); break; case CUR_LOWER_HALF: vgacon_set_cursor_size(c->state.x, - c->vc_font.height / 2, - c->vc_font.height - - (c->vc_font.height < + c->vc_cell_height / 2, + c->vc_cell_height - + (c->vc_cell_height < 10 ? 1 : 2)); break; case CUR_NONE: @@ -554,7 +554,7 @@ static void vgacon_cursor(struct vc_data *c, int mode) break; default: vgacon_set_cursor_size(c->state.x, 1, - c->vc_font.height); + c->vc_cell_height); break; } break; @@ -565,13 +565,13 @@ static int vgacon_doresize(struct vc_data *c, unsigned int width, unsigned int height) { unsigned long flags; - unsigned int scanlines = height * c->vc_font.height; + unsigned int scanlines = height * c->vc_cell_height; u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; raw_spin_lock_irqsave(&vga_lock, flags); vgacon_xres = width * VGA_FONTWIDTH; - vgacon_yres = height * c->vc_font.height; + vgacon_yres = height * c->vc_cell_height; if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg); max_scan = inb_p(vga_video_port_val); @@ -626,9 +626,9 @@ static int vgacon_doresize(struct vc_data *c, static int vgacon_switch(struct vc_data *c) { int x = c->vc_cols * VGA_FONTWIDTH; - int y = c->vc_rows * c->vc_font.height; + int y = c->vc_rows * c->vc_cell_height; int rows = screen_info.orig_video_lines * vga_default_font_height/ - c->vc_font.height; + c->vc_cell_height; /* * We need to save screen size here as it's the only way * we can spot the screen has been resized and we need to @@ -1041,7 +1041,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight) cursor_size_lastto = 0; c->vc_sw->con_cursor(c, CM_DRAW); } - c->vc_font.height = fontheight; + c->vc_font.height = c->vc_cell_height = fontheight; vc_resize(c, 0, rows); /* Adjust console size */ } } @@ -1096,12 +1096,12 @@ static int vgacon_resize(struct vc_data *c, unsigned int width, */ screen_info.orig_video_cols = width; screen_info.orig_video_lines = height; - vga_default_font_height = c->vc_font.height; + vga_default_font_height = c->vc_cell_height; return 0; } if (width % 2 || width > screen_info.orig_video_cols || height > (screen_info.orig_video_lines * vga_default_font_height)/ - c->vc_font.height) + c->vc_cell_height) return -EINVAL; if (con_is_visible(c) && !vga_is_gfx) /* who knows */ diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 153734816b49..d5b9c8d40c18 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -101,6 +101,7 @@ struct vc_data { unsigned int vc_rows; unsigned int vc_size_row; /* Bytes per row */ unsigned int vc_scan_lines; /* # of scan lines */ + unsigned int vc_cell_height; /* CRTC character cell height */ unsigned long vc_origin; /* [!] Start of real screen */ unsigned long vc_scr_end; /* [!] End of real screen */ unsigned long vc_visible_origin; /* [!] Top of visible window */ -- cgit v1.2.3-71-gd317 From 640d1eaff2c09e382a23bd831094ebbfaa16fef5 Mon Sep 17 00:00:00 2001 From: Jim Cromie Date: Tue, 4 May 2021 16:22:34 -0600 Subject: dyndbg: avoid calling dyndbg_emit_prefix when it has no work Wrap function in a static-inline one, which checks flags to avoid calling the function unnecessarily. And hoist its output-buffer initialization to the grand-caller, which is already allocating the buffer on the stack, and can trivially initialize it too. Signed-off-by: Jim Cromie Link: https://lore.kernel.org/r/20210504222235.1033685-2-jim.cromie@gmail.com Signed-off-by: Greg Kroah-Hartman --- include/linux/dynamic_debug.h | 5 +++++ lib/dynamic_debug.c | 19 ++++++++++++------- 2 files changed, 17 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index a57ee75342cf..dce631e678dd 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -32,6 +32,11 @@ struct _ddebug { #define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2) #define _DPRINTK_FLAGS_INCL_LINENO (1<<3) #define _DPRINTK_FLAGS_INCL_TID (1<<4) + +#define _DPRINTK_FLAGS_INCL_ANY \ + (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\ + _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID) + #if defined DEBUG #define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT #else diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 921d0a654243..398920403321 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -586,13 +586,11 @@ static int remaining(int wrote) return 0; } -static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf) +static char *__dynamic_emit_prefix(const struct _ddebug *desc, char *buf) { int pos_after_tid; int pos = 0; - *buf = '\0'; - if (desc->flags & _DPRINTK_FLAGS_INCL_TID) { if (in_interrupt()) pos += snprintf(buf + pos, remaining(pos), " "); @@ -618,11 +616,18 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf) return buf; } +static inline char *dynamic_emit_prefix(struct _ddebug *desc, char *buf) +{ + if (unlikely(desc->flags & _DPRINTK_FLAGS_INCL_ANY)) + return __dynamic_emit_prefix(desc, buf); + return buf; +} + void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) { va_list args; struct va_format vaf; - char buf[PREFIX_SIZE]; + char buf[PREFIX_SIZE] = ""; BUG_ON(!descriptor); BUG_ON(!fmt); @@ -655,7 +660,7 @@ void __dynamic_dev_dbg(struct _ddebug *descriptor, if (!dev) { printk(KERN_DEBUG "(NULL device *): %pV", &vaf); } else { - char buf[PREFIX_SIZE]; + char buf[PREFIX_SIZE] = ""; dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV", dynamic_emit_prefix(descriptor, buf), @@ -684,7 +689,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, vaf.va = &args; if (dev && dev->dev.parent) { - char buf[PREFIX_SIZE]; + char buf[PREFIX_SIZE] = ""; dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent, "%s%s %s %s%s: %pV", @@ -720,7 +725,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, vaf.va = &args; if (ibdev && ibdev->dev.parent) { - char buf[PREFIX_SIZE]; + char buf[PREFIX_SIZE] = ""; dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent, "%s%s %s %s: %pV", -- cgit v1.2.3-71-gd317 From 22247efd822e6d263f3c8bd327f3f769aea9b1d9 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Fri, 14 May 2021 17:27:04 -0700 Subject: mm/hugetlb: fix F_SEAL_FUTURE_WRITE Patch series "mm/hugetlb: Fix issues on file sealing and fork", v2. Hugh reported issue with F_SEAL_FUTURE_WRITE not applied correctly to hugetlbfs, which I can easily verify using the memfd_test program, which seems that the program is hardly run with hugetlbfs pages (as by default shmem). Meanwhile I found another probably even more severe issue on that hugetlb fork won't wr-protect child cow pages, so child can potentially write to parent private pages. Patch 2 addresses that. After this series applied, "memfd_test hugetlbfs" should start to pass. This patch (of 2): F_SEAL_FUTURE_WRITE is missing for hugetlb starting from the first day. There is a test program for that and it fails constantly. $ ./memfd_test hugetlbfs memfd-hugetlb: CREATE memfd-hugetlb: BASIC memfd-hugetlb: SEAL-WRITE memfd-hugetlb: SEAL-FUTURE-WRITE mmap() didn't fail as expected Aborted (core dumped) I think it's probably because no one is really running the hugetlbfs test. Fix it by checking FUTURE_WRITE also in hugetlbfs_file_mmap() as what we do in shmem_mmap(). Generalize a helper for that. Link: https://lkml.kernel.org/r/20210503234356.9097-1-peterx@redhat.com Link: https://lkml.kernel.org/r/20210503234356.9097-2-peterx@redhat.com Fixes: ab3948f58ff84 ("mm/memfd: add an F_SEAL_FUTURE_WRITE seal to memfd") Signed-off-by: Peter Xu Reported-by: Hugh Dickins Reviewed-by: Mike Kravetz Cc: Joel Fernandes (Google) Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 5 +++++ include/linux/mm.h | 32 ++++++++++++++++++++++++++++++++ mm/shmem.c | 22 ++++------------------ 3 files changed, 41 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a2a42335e8fd..9d9e0097c1d3 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec) static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); + struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); loff_t len, vma_len; int ret; struct hstate *h = hstate_file(file); @@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; vma->vm_ops = &hugetlb_vm_ops; + ret = seal_check_future_write(info->seals, vma); + if (ret) + return ret; + /* * page based offset in vm_pgoff could be sufficiently large to * overflow a loff_t when converted to byte offset. This can diff --git a/include/linux/mm.h b/include/linux/mm.h index 322ec61d0da7..c274f75efcf9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3216,5 +3216,37 @@ void mem_dump_obj(void *object); static inline void mem_dump_obj(void *object) {} #endif +/** + * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it + * @seals: the seals to check + * @vma: the vma to operate on + * + * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on + * the vma flags. Return 0 if check pass, or <0 for errors. + */ +static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) +{ + if (seals & F_SEAL_FUTURE_WRITE) { + /* + * New PROT_WRITE and MAP_SHARED mmaps are not allowed when + * "future write" seal active. + */ + if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) + return -EPERM; + + /* + * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as + * MAP_SHARED and read-only, take care to not allow mprotect to + * revert protections on such mappings. Do this only for shared + * mappings. For private mappings, don't need to mask + * VM_MAYWRITE as we still want them to be COW-writable. + */ + if (vma->vm_flags & VM_SHARED) + vma->vm_flags &= ~(VM_MAYWRITE); + } + + return 0; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/mm/shmem.c b/mm/shmem.c index a08cedefbfaa..eb131b9fb190 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2258,25 +2258,11 @@ out_nomem: static int shmem_mmap(struct file *file, struct vm_area_struct *vma) { struct shmem_inode_info *info = SHMEM_I(file_inode(file)); + int ret; - if (info->seals & F_SEAL_FUTURE_WRITE) { - /* - * New PROT_WRITE and MAP_SHARED mmaps are not allowed when - * "future write" seal active. - */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return -EPERM; - - /* - * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as - * MAP_SHARED and read-only, take care to not allow mprotect to - * revert protections on such mappings. Do this only for shared - * mappings. For private mappings, don't need to mask - * VM_MAYWRITE as we still want them to be COW-writable. - */ - if (vma->vm_flags & VM_SHARED) - vma->vm_flags &= ~(VM_MAYWRITE); - } + ret = seal_check_future_write(info->seals, vma); + if (ret) + return ret; /* arm64 - allow memory tagging on RAM-based files */ vma->vm_flags |= VM_MTE_ALLOWED; -- cgit v1.2.3-71-gd317 From 9ddb3c14afba8bc5950ed297f02d4ae05ff35cd1 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 May 2021 17:27:24 -0700 Subject: mm: fix struct page layout on 32-bit systems 32-bit architectures which expect 8-byte alignment for 8-byte integers and need 64-bit DMA addresses (arm, mips, ppc) had their struct page inadvertently expanded in 2019. When the dma_addr_t was added, it forced the alignment of the union to 8 bytes, which inserted a 4 byte gap between 'flags' and the union. Fix this by storing the dma_addr_t in one or two adjacent unsigned longs. This restores the alignment to that of an unsigned long. We always store the low bits in the first word to prevent the PageTail bit from being inadvertently set on a big endian platform. If that happened, get_user_pages_fast() racing against a page which was freed and reallocated to the page_pool could dereference a bogus compound_head(), which would be hard to trace back to this cause. Link: https://lkml.kernel.org/r/20210510153211.1504886-1-willy@infradead.org Fixes: c25fff7171be ("mm: add dma_addr_t to struct page") Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Ilias Apalodimas Acked-by: Jesper Dangaard Brouer Acked-by: Vlastimil Babka Tested-by: Matteo Croce Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 4 ++-- include/net/page_pool.h | 12 +++++++++++- net/core/page_pool.c | 12 +++++++----- 3 files changed, 20 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6613b26a8894..5aacc1c10a45 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -97,10 +97,10 @@ struct page { }; struct { /* page_pool used by netstack */ /** - * @dma_addr: might require a 64-bit value even on + * @dma_addr: might require a 64-bit value on * 32-bit architectures. */ - dma_addr_t dma_addr; + unsigned long dma_addr[2]; }; struct { /* slab, slob and slub */ union { diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 6d517a37c18b..b4b6de909c93 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, static inline dma_addr_t page_pool_get_dma_addr(struct page *page) { - return page->dma_addr; + dma_addr_t ret = page->dma_addr[0]; + if (sizeof(dma_addr_t) > sizeof(unsigned long)) + ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16; + return ret; +} + +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) +{ + page->dma_addr[0] = addr; + if (sizeof(dma_addr_t) > sizeof(unsigned long)) + page->dma_addr[1] = upper_32_bits(addr); } static inline bool is_page_pool_compiled_in(void) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 9ec1aa9640ad..3c4c4c7a0402 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool, struct page *page, unsigned int dma_sync_size) { + dma_addr_t dma_addr = page_pool_get_dma_addr(page); + dma_sync_size = min(dma_sync_size, pool->p.max_len); - dma_sync_single_range_for_device(pool->p.dev, page->dma_addr, + dma_sync_single_range_for_device(pool->p.dev, dma_addr, pool->p.offset, dma_sync_size, pool->p.dma_dir); } @@ -195,7 +197,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page) if (dma_mapping_error(pool->p.dev, dma)) return false; - page->dma_addr = dma; + page_pool_set_dma_addr(page, dma); if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) page_pool_dma_sync_for_device(pool, page, pool->p.max_len); @@ -331,13 +333,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page) */ goto skip_dma_unmap; - dma = page->dma_addr; + dma = page_pool_get_dma_addr(page); - /* When page is unmapped, it cannot be returned our pool */ + /* When page is unmapped, it cannot be returned to our pool */ dma_unmap_page_attrs(pool->p.dev, dma, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC); - page->dma_addr = 0; + page_pool_set_dma_addr(page, 0); skip_dma_unmap: /* This may be the last page returned, releasing the pool, so * it is not safe to reference pool afterwards. -- cgit v1.2.3-71-gd317 From 076171a67789ad0107de44c2964f2e46a7d0d7b8 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 May 2021 17:27:30 -0700 Subject: mm/filemap: fix readahead return types A readahead request will not allocate more memory than can be represented by a size_t, even on systems that have HIGHMEM available. Change the length functions from returning an loff_t to a size_t. Link: https://lkml.kernel.org/r/20210510201201.1558972-1-willy@infradead.org Fixes: 32c0a6bcaa1f57 ("btrfs: add and use readahead_batch_length") Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Darrick J. Wong Reported-by: Linus Torvalds Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/iomap/buffered-io.c | 4 ++-- include/linux/pagemap.h | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index f2cd2034a87b..9023717c5188 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -394,7 +394,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) { struct inode *inode = rac->mapping->host; loff_t pos = readahead_pos(rac); - loff_t length = readahead_length(rac); + size_t length = readahead_length(rac); struct iomap_readpage_ctx ctx = { .rac = rac, }; @@ -402,7 +402,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) trace_iomap_readahead(inode, readahead_count(rac)); while (length > 0) { - loff_t ret = iomap_apply(inode, pos, length, 0, ops, + ssize_t ret = iomap_apply(inode, pos, length, 0, ops, &ctx, iomap_readahead_actor); if (ret <= 0) { WARN_ON_ONCE(ret == 0); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a4bd41128bf3..e89df447fae3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -997,9 +997,9 @@ static inline loff_t readahead_pos(struct readahead_control *rac) * readahead_length - The number of bytes in this readahead request. * @rac: The readahead request. */ -static inline loff_t readahead_length(struct readahead_control *rac) +static inline size_t readahead_length(struct readahead_control *rac) { - return (loff_t)rac->_nr_pages * PAGE_SIZE; + return rac->_nr_pages * PAGE_SIZE; } /** @@ -1024,7 +1024,7 @@ static inline unsigned int readahead_count(struct readahead_control *rac) * readahead_batch_length - The number of bytes in the current batch. * @rac: The readahead request. */ -static inline loff_t readahead_batch_length(struct readahead_control *rac) +static inline size_t readahead_batch_length(struct readahead_control *rac) { return rac->_batch_count * PAGE_SIZE; } -- cgit v1.2.3-71-gd317 From add0b32ef9146a8559a60aed54c37692a5f9d34f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 30 Apr 2021 17:06:01 -0500 Subject: siginfo: Move si_trapno inside the union inside _si_fault It turns out that linux uses si_trapno very sparingly, and as such it can be considered extra information for a very narrow selection of signals, rather than information that is present with every fault reported in siginfo. As such move si_trapno inside the union inside of _si_fault. This results in no change in placement, and makes it eaiser to extend _si_fault in the future as this reduces the number of special cases. In particular with si_trapno included in the union it is no longer a concern that the union must be pointer aligned on most architectures because the union follows immediately after si_addr which is a pointer. This change results in a difference in siginfo field placement on sparc and alpha for the fields si_addr_lsb, si_lower, si_upper, si_pkey, and si_perf. These architectures do not implement the signals that would use si_addr_lsb, si_lower, si_upper, si_pkey, and si_perf. Further these architecture have not yet implemented the userspace that would use si_perf. The point of this change is in fact to correct these placement issues before sparc or alpha grow userspace that cares. This change was discussed[1] and the agreement is that this change is currently safe. [1]: https://lkml.kernel.org/r/CAK8P3a0+uKYwL1NhY6Hvtieghba2hKYGD6hcKx5n8=4Gtt+pHA@mail.gmail.com Acked-by: Marco Elver v1: https://lkml.kernel.org/r/m1tunns7yf.fsf_-_@fess.ebiederm.org v2: https://lkml.kernel.org/r/20210505141101.11519-5-ebiederm@xmission.com Link: https://lkml.kernel.org/r/20210517195748.8880-1-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- arch/x86/kernel/signal_compat.c | 3 +++ include/linux/compat.h | 5 ++--- include/uapi/asm-generic/siginfo.h | 7 ++----- kernel/signal.c | 1 + 4 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index 0e5d0a7e203b..a9fcabd8a5e5 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -127,6 +127,9 @@ static inline void signal_compat_build_tests(void) BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10); BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C); + BUILD_BUG_ON(offsetof(siginfo_t, si_trapno) != 0x18); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_trapno) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18); BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10); diff --git a/include/linux/compat.h b/include/linux/compat.h index f0d2dd35d408..6af7bef15e94 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -214,12 +214,11 @@ typedef struct compat_siginfo { /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ struct { compat_uptr_t _addr; /* faulting insn/memory ref. */ -#ifdef __ARCH_SI_TRAPNO - int _trapno; /* TRAP # which caused the signal */ -#endif #define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \ sizeof(short) : __alignof__(compat_uptr_t)) union { + /* used on alpha and sparc */ + int _trapno; /* TRAP # which caused the signal */ /* * used when si_code=BUS_MCEERR_AR or * used when si_code=BUS_MCEERR_AO diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 03d6f6d2c1fe..e663bf117b46 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -63,9 +63,6 @@ union __sifields { /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ struct { void __user *_addr; /* faulting insn/memory ref. */ -#ifdef __ARCH_SI_TRAPNO - int _trapno; /* TRAP # which caused the signal */ -#endif #ifdef __ia64__ int _imm; /* immediate value for "break" */ unsigned int _flags; /* see ia64 si_flags */ @@ -75,6 +72,8 @@ union __sifields { #define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \ sizeof(short) : __alignof__(void *)) union { + /* used on alpha and sparc */ + int _trapno; /* TRAP # which caused the signal */ /* * used when si_code=BUS_MCEERR_AR or * used when si_code=BUS_MCEERR_AO @@ -150,9 +149,7 @@ typedef struct siginfo { #define si_int _sifields._rt._sigval.sival_int #define si_ptr _sifields._rt._sigval.sival_ptr #define si_addr _sifields._sigfault._addr -#ifdef __ARCH_SI_TRAPNO #define si_trapno _sifields._sigfault._trapno -#endif #define si_addr_lsb _sifields._sigfault._addr_lsb #define si_lower _sifields._sigfault._addr_bnd._lower #define si_upper _sifields._sigfault._addr_bnd._upper diff --git a/kernel/signal.c b/kernel/signal.c index c3017aa8024a..65888aec65a0 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -4607,6 +4607,7 @@ static inline void siginfo_buildtime_checks(void) /* sigfault */ CHECK_OFFSET(si_addr); + CHECK_OFFSET(si_trapno); CHECK_OFFSET(si_addr_lsb); CHECK_OFFSET(si_lower); CHECK_OFFSET(si_upper); -- cgit v1.2.3-71-gd317 From 9abcabe3111811aeae0f3a14e159b14248631875 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 30 Apr 2021 17:29:36 -0500 Subject: signal: Implement SIL_FAULT_TRAPNO Now that si_trapno is part of the union in _si_fault and available on all architectures, add SIL_FAULT_TRAPNO and update siginfo_layout to return SIL_FAULT_TRAPNO when the code assumes si_trapno is valid. There is room for future changes to reduce when si_trapno is valid but this is all that is needed to make si_trapno and the other members of the the union in _sigfault mutually exclusive. Update the code that uses siginfo_layout to deal with SIL_FAULT_TRAPNO and have the same code ignore si_trapno in in all other cases. v1: https://lkml.kernel.org/r/m1o8dvs7s7.fsf_-_@fess.ebiederm.org v2: https://lkml.kernel.org/r/20210505141101.11519-6-ebiederm@xmission.com Link: https://lkml.kernel.org/r/20210517195748.8880-2-ebiederm@xmission.com Reviewed-by: Marco Elver Signed-off-by: "Eric W. Biederman" --- fs/signalfd.c | 8 +++----- include/linux/signal.h | 1 + kernel/signal.c | 34 ++++++++++++---------------------- 3 files changed, 16 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/fs/signalfd.c b/fs/signalfd.c index 040a1142915f..e87e59581653 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -123,15 +123,13 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, */ case SIL_FAULT: new.ssi_addr = (long) kinfo->si_addr; -#ifdef __ARCH_SI_TRAPNO + break; + case SIL_FAULT_TRAPNO: + new.ssi_addr = (long) kinfo->si_addr; new.ssi_trapno = kinfo->si_trapno; -#endif break; case SIL_FAULT_MCEERR: new.ssi_addr = (long) kinfo->si_addr; -#ifdef __ARCH_SI_TRAPNO - new.ssi_trapno = kinfo->si_trapno; -#endif new.ssi_addr_lsb = (short) kinfo->si_addr_lsb; break; case SIL_PERF_EVENT: diff --git a/include/linux/signal.h b/include/linux/signal.h index 1e98548d7cf6..5160fd45e5ca 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -40,6 +40,7 @@ enum siginfo_layout { SIL_TIMER, SIL_POLL, SIL_FAULT, + SIL_FAULT_TRAPNO, SIL_FAULT_MCEERR, SIL_FAULT_BNDERR, SIL_FAULT_PKUERR, diff --git a/kernel/signal.c b/kernel/signal.c index 65888aec65a0..597594ee72de 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1194,6 +1194,7 @@ static inline bool has_si_pid_and_uid(struct kernel_siginfo *info) case SIL_TIMER: case SIL_POLL: case SIL_FAULT: + case SIL_FAULT_TRAPNO: case SIL_FAULT_MCEERR: case SIL_FAULT_BNDERR: case SIL_FAULT_PKUERR: @@ -2527,6 +2528,7 @@ static void hide_si_addr_tag_bits(struct ksignal *ksig) { switch (siginfo_layout(ksig->sig, ksig->info.si_code)) { case SIL_FAULT: + case SIL_FAULT_TRAPNO: case SIL_FAULT_MCEERR: case SIL_FAULT_BNDERR: case SIL_FAULT_PKUERR: @@ -3214,6 +3216,10 @@ enum siginfo_layout siginfo_layout(unsigned sig, int si_code) #endif else if ((sig == SIGTRAP) && (si_code == TRAP_PERF)) layout = SIL_PERF_EVENT; +#ifdef __ARCH_SI_TRAPNO + else if (layout == SIL_FAULT) + layout = SIL_FAULT_TRAPNO; +#endif } else if (si_code <= NSIGPOLL) layout = SIL_POLL; @@ -3317,30 +3323,22 @@ void copy_siginfo_to_external32(struct compat_siginfo *to, break; case SIL_FAULT: to->si_addr = ptr_to_compat(from->si_addr); -#ifdef __ARCH_SI_TRAPNO + break; + case SIL_FAULT_TRAPNO: + to->si_addr = ptr_to_compat(from->si_addr); to->si_trapno = from->si_trapno; -#endif break; case SIL_FAULT_MCEERR: to->si_addr = ptr_to_compat(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - to->si_trapno = from->si_trapno; -#endif to->si_addr_lsb = from->si_addr_lsb; break; case SIL_FAULT_BNDERR: to->si_addr = ptr_to_compat(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - to->si_trapno = from->si_trapno; -#endif to->si_lower = ptr_to_compat(from->si_lower); to->si_upper = ptr_to_compat(from->si_upper); break; case SIL_FAULT_PKUERR: to->si_addr = ptr_to_compat(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - to->si_trapno = from->si_trapno; -#endif to->si_pkey = from->si_pkey; break; case SIL_PERF_EVENT: @@ -3401,30 +3399,22 @@ static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, break; case SIL_FAULT: to->si_addr = compat_ptr(from->si_addr); -#ifdef __ARCH_SI_TRAPNO + break; + case SIL_FAULT_TRAPNO: + to->si_addr = compat_ptr(from->si_addr); to->si_trapno = from->si_trapno; -#endif break; case SIL_FAULT_MCEERR: to->si_addr = compat_ptr(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - to->si_trapno = from->si_trapno; -#endif to->si_addr_lsb = from->si_addr_lsb; break; case SIL_FAULT_BNDERR: to->si_addr = compat_ptr(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - to->si_trapno = from->si_trapno; -#endif to->si_lower = compat_ptr(from->si_lower); to->si_upper = compat_ptr(from->si_upper); break; case SIL_FAULT_PKUERR: to->si_addr = compat_ptr(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - to->si_trapno = from->si_trapno; -#endif to->si_pkey = from->si_pkey; break; case SIL_PERF_EVENT: -- cgit v1.2.3-71-gd317 From af5eeab7e8e8c2f0fad10e4ab8cc8092012a2d5b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 2 May 2021 14:27:24 -0500 Subject: signal: Factor force_sig_perf out of perf_sigtrap Separate filling in siginfo for TRAP_PERF from deciding that siginal needs to be sent. There are enough little details that need to be correct when properly filling in siginfo_t that it is easy to make mistakes if filling in the siginfo_t is in the same function with other logic. So factor out force_sig_perf to reduce the cognative load of on reviewers, maintainers and implementors. v1: https://lkml.kernel.org/r/m17dkjqqxz.fsf_-_@fess.ebiederm.org v2: https://lkml.kernel.org/r/20210505141101.11519-10-ebiederm@xmission.com Link: https://lkml.kernel.org/r/20210517195748.8880-3-ebiederm@xmission.com Reviewed-by: Marco Elver Acked-by: Peter Zijlstra (Intel) Signed-off-by: "Eric W. Biederman" --- include/linux/sched/signal.h | 1 + kernel/events/core.c | 11 ++--------- kernel/signal.c | 13 +++++++++++++ 3 files changed, 16 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 3f6a0fcaa10c..7f4278fa21fe 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -326,6 +326,7 @@ int send_sig_mceerr(int code, void __user *, short, struct task_struct *); int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); int force_sig_pkuerr(void __user *addr, u32 pkey); +int force_sig_perf(void __user *addr, u32 type, u64 sig_data); int force_sig_ptrace_errno_trap(int errno, void __user *addr); diff --git a/kernel/events/core.c b/kernel/events/core.c index 928b166d888e..48ea8863183b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6394,8 +6394,6 @@ void perf_event_wakeup(struct perf_event *event) static void perf_sigtrap(struct perf_event *event) { - struct kernel_siginfo info; - /* * We'd expect this to only occur if the irq_work is delayed and either * ctx->task or current has changed in the meantime. This can be the @@ -6410,13 +6408,8 @@ static void perf_sigtrap(struct perf_event *event) if (current->flags & PF_EXITING) return; - clear_siginfo(&info); - info.si_signo = SIGTRAP; - info.si_code = TRAP_PERF; - info.si_errno = event->attr.type; - info.si_perf = event->attr.sig_data; - info.si_addr = (void __user *)event->pending_addr; - force_sig_info(&info); + force_sig_perf((void __user *)event->pending_addr, + event->attr.type, event->attr.sig_data); } static void perf_pending_event_disable(struct perf_event *event) diff --git a/kernel/signal.c b/kernel/signal.c index 597594ee72de..3a18d13c39b2 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1763,6 +1763,19 @@ int force_sig_pkuerr(void __user *addr, u32 pkey) } #endif +int force_sig_perf(void __user *addr, u32 type, u64 sig_data) +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = SIGTRAP; + info.si_errno = type; + info.si_code = TRAP_PERF; + info.si_addr = addr; + info.si_perf = sig_data; + return force_sig_info(&info); +} + /* For the crazy architectures that include trap information in * the errno field, instead of an actual errno value. */ -- cgit v1.2.3-71-gd317 From 0683b53197b55343a166f1507086823030809a19 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 2 May 2021 17:28:31 -0500 Subject: signal: Deliver all of the siginfo perf data in _perf Don't abuse si_errno and deliver all of the perf data in _perf member of siginfo_t. Note: The data field in the perf data structures in a u64 to allow a pointer to be encoded without needed to implement a 32bit and 64bit version of the same structure. There already exists a 32bit and 64bit versions siginfo_t, and the 32bit version can not include a 64bit member as it only has 32bit alignment. So unsigned long is used in siginfo_t instead of a u64 as unsigned long can encode a pointer on all architectures linux supports. v1: https://lkml.kernel.org/r/m11rarqqx2.fsf_-_@fess.ebiederm.org v2: https://lkml.kernel.org/r/20210503203814.25487-10-ebiederm@xmission.com v3: https://lkml.kernel.org/r/20210505141101.11519-11-ebiederm@xmission.com Link: https://lkml.kernel.org/r/20210517195748.8880-4-ebiederm@xmission.com Reviewed-by: Marco Elver Signed-off-by: "Eric W. Biederman" --- arch/m68k/kernel/signal.c | 3 ++- arch/x86/kernel/signal_compat.c | 6 ++++-- fs/signalfd.c | 3 ++- include/linux/compat.h | 5 ++++- include/uapi/asm-generic/siginfo.h | 8 ++++++-- include/uapi/linux/perf_event.h | 2 +- include/uapi/linux/signalfd.h | 4 ++-- kernel/signal.c | 21 +++++++++++++-------- .../testing/selftests/perf_events/sigtrap_threads.c | 14 +++++++------- 9 files changed, 41 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index a4b7ee1df211..8f215e79e70e 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -623,7 +623,8 @@ static inline void siginfo_build_tests(void) BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12); /* _sigfault._perf */ - BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14); /* _sigpoll */ BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c); diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index a9fcabd8a5e5..06743ec054d2 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -141,8 +141,10 @@ static inline void signal_compat_build_tests(void) BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20); BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14); - BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x18); - BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x18); + BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x20); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_data) != 0x10); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_type) != 0x14); CHECK_CSI_OFFSET(_sigpoll); CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int)); diff --git a/fs/signalfd.c b/fs/signalfd.c index e87e59581653..373df2f12415 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -134,7 +134,8 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, break; case SIL_PERF_EVENT: new.ssi_addr = (long) kinfo->si_addr; - new.ssi_perf = kinfo->si_perf; + new.ssi_perf_type = kinfo->si_perf_type; + new.ssi_perf_data = kinfo->si_perf_data; break; case SIL_CHLD: new.ssi_pid = kinfo->si_pid; diff --git a/include/linux/compat.h b/include/linux/compat.h index 6af7bef15e94..a27fffaae121 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -236,7 +236,10 @@ typedef struct compat_siginfo { u32 _pkey; } _addr_pkey; /* used when si_code=TRAP_PERF */ - compat_ulong_t _perf; + struct { + compat_ulong_t _data; + u32 _type; + } _perf; }; } _sigfault; diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index e663bf117b46..5a3c221f4c9d 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -91,7 +91,10 @@ union __sifields { __u32 _pkey; } _addr_pkey; /* used when si_code=TRAP_PERF */ - unsigned long _perf; + struct { + unsigned long _data; + __u32 _type; + } _perf; }; } _sigfault; @@ -154,7 +157,8 @@ typedef struct siginfo { #define si_lower _sifields._sigfault._addr_bnd._lower #define si_upper _sifields._sigfault._addr_bnd._upper #define si_pkey _sifields._sigfault._addr_pkey._pkey -#define si_perf _sifields._sigfault._perf +#define si_perf_data _sifields._sigfault._perf._data +#define si_perf_type _sifields._sigfault._perf._type #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd #define si_call_addr _sifields._sigsys._call_addr diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e54e639248c8..7b14753b3d38 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -464,7 +464,7 @@ struct perf_event_attr { /* * User provided data if sigtrap=1, passed back to user via - * siginfo_t::si_perf, e.g. to permit user to identify the event. + * siginfo_t::si_perf_data, e.g. to permit user to identify the event. */ __u64 sig_data; }; diff --git a/include/uapi/linux/signalfd.h b/include/uapi/linux/signalfd.h index 7e333042c7e3..e78dddf433fc 100644 --- a/include/uapi/linux/signalfd.h +++ b/include/uapi/linux/signalfd.h @@ -39,8 +39,8 @@ struct signalfd_siginfo { __s32 ssi_syscall; __u64 ssi_call_addr; __u32 ssi_arch; - __u32 __pad3; - __u64 ssi_perf; + __u32 ssi_perf_type; + __u64 ssi_perf_data; /* * Pad strcture to 128 bytes. Remember to update the diff --git a/kernel/signal.c b/kernel/signal.c index 3a18d13c39b2..dca53515ae3f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1768,11 +1768,13 @@ int force_sig_perf(void __user *addr, u32 type, u64 sig_data) struct kernel_siginfo info; clear_siginfo(&info); - info.si_signo = SIGTRAP; - info.si_errno = type; - info.si_code = TRAP_PERF; - info.si_addr = addr; - info.si_perf = sig_data; + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_PERF; + info.si_addr = addr; + info.si_perf_data = sig_data; + info.si_perf_type = type; + return force_sig_info(&info); } @@ -3356,7 +3358,8 @@ void copy_siginfo_to_external32(struct compat_siginfo *to, break; case SIL_PERF_EVENT: to->si_addr = ptr_to_compat(from->si_addr); - to->si_perf = from->si_perf; + to->si_perf_data = from->si_perf_data; + to->si_perf_type = from->si_perf_type; break; case SIL_CHLD: to->si_pid = from->si_pid; @@ -3432,7 +3435,8 @@ static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, break; case SIL_PERF_EVENT: to->si_addr = compat_ptr(from->si_addr); - to->si_perf = from->si_perf; + to->si_perf_data = from->si_perf_data; + to->si_perf_type = from->si_perf_type; break; case SIL_CHLD: to->si_pid = from->si_pid; @@ -4615,7 +4619,8 @@ static inline void siginfo_buildtime_checks(void) CHECK_OFFSET(si_lower); CHECK_OFFSET(si_upper); CHECK_OFFSET(si_pkey); - CHECK_OFFSET(si_perf); + CHECK_OFFSET(si_perf_data); + CHECK_OFFSET(si_perf_type); /* sigpoll */ CHECK_OFFSET(si_band); diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c index 78ddf5e11625..8e83cf91513a 100644 --- a/tools/testing/selftests/perf_events/sigtrap_threads.c +++ b/tools/testing/selftests/perf_events/sigtrap_threads.c @@ -43,7 +43,7 @@ static struct { siginfo_t first_siginfo; /* First observed siginfo_t. */ } ctx; -/* Unique value to check si_perf is correctly set from perf_event_attr::sig_data. */ +/* Unique value to check si_perf_data is correctly set from perf_event_attr::sig_data. */ #define TEST_SIG_DATA(addr) (~(unsigned long)(addr)) static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr) @@ -164,8 +164,8 @@ TEST_F(sigtrap_threads, enable_event) EXPECT_EQ(ctx.signal_count, NUM_THREADS); EXPECT_EQ(ctx.tids_want_signal, 0); EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); - EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT); - EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on)); + EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on)); /* Check enabled for parent. */ ctx.iterate_on = 0; @@ -183,8 +183,8 @@ TEST_F(sigtrap_threads, modify_and_enable_event) EXPECT_EQ(ctx.signal_count, NUM_THREADS); EXPECT_EQ(ctx.tids_want_signal, 0); EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); - EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT); - EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on)); + EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on)); /* Check enabled for parent. */ ctx.iterate_on = 0; @@ -203,8 +203,8 @@ TEST_F(sigtrap_threads, signal_stress) EXPECT_EQ(ctx.signal_count, NUM_THREADS * ctx.iterate_on); EXPECT_EQ(ctx.tids_want_signal, 0); EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); - EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT); - EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on)); + EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on)); } TEST_HARNESS_MAIN -- cgit v1.2.3-71-gd317 From 3410fbcd47dc6479af4309febf760ccaa5efb472 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 12 May 2021 13:52:27 +0300 Subject: {net, RDMA}/mlx5: Fix override of log_max_qp by other device mlx5_core_dev holds pointer to static profile, hence when the log_max_qp of the profile is override by some device, then it effect all other mlx5 devices that share the same profile. Fix it by having a profile instance for every mlx5 device. Fixes: 883371c453b9 ("net/mlx5: Check FW limitations on log_max_qp before setting it") Signed-off-by: Maor Gottlieb Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/mr.c | 4 +-- drivers/net/ethernet/mellanox/mlx5/core/main.c | 11 +++---- include/linux/mlx5/driver.h | 44 +++++++++++++------------- 3 files changed, 29 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 4388afeff251..9662cd39c7ff 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -743,10 +743,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / MLX5_IB_UMR_OCTOWORD; ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; - if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && + if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && !dev->is_rep && mlx5_core_is_pf(dev->mdev) && mlx5_ib_can_load_pas_with_umr(dev, 0)) - ent->limit = dev->mdev->profile->mr_cache[i].limit; + ent->limit = dev->mdev->profile.mr_cache[i].limit; else ent->limit = 0; spin_lock_irq(&ent->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c114365eb126..a1d67bd7fb43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -503,7 +503,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx) static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) { - struct mlx5_profile *prof = dev->profile; + struct mlx5_profile *prof = &dev->profile; void *set_hca_cap; int err; @@ -524,11 +524,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) to_fw_pkey_sz(dev, 128)); /* Check log_max_qp from HCA caps to set in current profile */ - if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) { + if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", - profile[prof_sel].log_max_qp, + prof->log_max_qp, MLX5_CAP_GEN_MAX(dev, log_max_qp)); - profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); + prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); } if (prof->mask & MLX5_PROF_MASK_QP_SIZE) MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, @@ -1381,8 +1381,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) struct mlx5_priv *priv = &dev->priv; int err; - dev->profile = &profile[profile_idx]; - + memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile)); INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); mutex_init(&dev->intf_state_mutex); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f8e8d7e90616..020a8f7fdbdd 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -703,6 +703,27 @@ struct mlx5_hv_vhca; #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) +enum { + MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, + MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, +}; + +enum { + MR_CACHE_LAST_STD_ENTRY = 20, + MLX5_IMR_MTT_CACHE_ENTRY, + MLX5_IMR_KSM_CACHE_ENTRY, + MAX_MR_CACHE_ENTRIES +}; + +struct mlx5_profile { + u64 mask; + u8 log_max_qp; + struct { + int size; + int limit; + } mr_cache[MAX_MR_CACHE_ENTRIES]; +}; + struct mlx5_core_dev { struct device *device; enum mlx5_coredev_type coredev_type; @@ -731,7 +752,7 @@ struct mlx5_core_dev { struct mutex intf_state_mutex; unsigned long intf_state; struct mlx5_priv priv; - struct mlx5_profile *profile; + struct mlx5_profile profile; u32 issi; struct mlx5e_resources mlx5e_res; struct mlx5_dm *dm; @@ -1083,18 +1104,6 @@ static inline u8 mlx5_mkey_variant(u32 mkey) return mkey & 0xff; } -enum { - MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, - MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, -}; - -enum { - MR_CACHE_LAST_STD_ENTRY = 20, - MLX5_IMR_MTT_CACHE_ENTRY, - MLX5_IMR_KSM_CACHE_ENTRY, - MAX_MR_CACHE_ENTRIES -}; - /* Async-atomic event notifier used by mlx5 core to forward FW * evetns recived from event queue to mlx5 consumers. * Optimise event queue dipatching. @@ -1148,15 +1157,6 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, struct ib_device *device, struct rdma_netdev_alloc_params *params); -struct mlx5_profile { - u64 mask; - u8 log_max_qp; - struct { - int size; - int limit; - } mr_cache[MAX_MR_CACHE_ENTRIES]; -}; - enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; -- cgit v1.2.3-71-gd317 From 7c9f131f366ab414691907fa0407124ea2b2f3bc Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 22 Apr 2021 15:48:10 +0300 Subject: {net,vdpa}/mlx5: Configure interface MAC into mpfs L2 table net/mlx5: Expose MPFS configuration API MPFS is the multi physical function switch that bridges traffic between the physical port and any physical functions associated with it. The driver is required to add or remove MAC entries to properly forward incoming traffic to the correct physical function. We export the API to control MPFS so that other drivers, such as mlx5_vdpa are able to add MAC addresses of their network interfaces. The MAC address of the vdpa interface must be configured into the MPFS L2 address. Failing to do so could cause, in some NIC configurations, failure to forward packets to the vdpa network device instance. Fix this by adding calls to update the MPFS table. CC: CC: CC: Fixes: 1a86b377aa21 ("vdpa/mlx5: Add VDPA driver for supported mlx5 devices") Signed-off-by: Eli Cohen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h | 5 +---- drivers/vdpa/mlx5/net/mlx5_vnet.c | 19 ++++++++++++++++++- include/linux/mlx5/mpfs.h | 18 ++++++++++++++++++ 6 files changed, 42 insertions(+), 5 deletions(-) create mode 100644 include/linux/mlx5/mpfs.h (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 0d571a0c76d9..0b75fab41ae8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "en.h" #include "en_rep.h" #include "lib/mpfs.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 570f2280823c..b88705a3a1a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "esw/acl/lgcy.h" #include "esw/legacy.h" #include "mlx5_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index fd8449ff9e17..839a01da110f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include "mlx5_core.h" #include "lib/mpfs.h" @@ -175,6 +176,7 @@ out: mutex_unlock(&mpfs->lock); return err; } +EXPORT_SYMBOL(mlx5_mpfs_add_mac); int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { @@ -206,3 +208,4 @@ unlock: mutex_unlock(&mpfs->lock); return err; } +EXPORT_SYMBOL(mlx5_mpfs_del_mac); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h index 4a7b2c3203a7..4a293542a7aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h @@ -84,12 +84,9 @@ struct l2addr_node { #ifdef CONFIG_MLX5_MPFS int mlx5_mpfs_init(struct mlx5_core_dev *dev); void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev); -int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac); -int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac); #else /* #ifndef CONFIG_MLX5_MPFS */ static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {} -static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } -static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } #endif + #endif diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 189e4385df40..dda5dc6f7737 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "mlx5_vdpa.h" MODULE_AUTHOR("Eli Cohen "); @@ -1859,11 +1860,16 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb static void mlx5_vdpa_free(struct vdpa_device *vdev) { struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_core_dev *pfmdev; struct mlx5_vdpa_net *ndev; ndev = to_mlx5_vdpa_ndev(mvdev); free_resources(ndev); + if (!is_zero_ether_addr(ndev->config.mac)) { + pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); + mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); + } mlx5_vdpa_free_resources(&ndev->mvdev); mutex_destroy(&ndev->reslock); } @@ -1990,6 +1996,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) { struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev); struct virtio_net_config *config; + struct mlx5_core_dev *pfmdev; struct mlx5_vdpa_dev *mvdev; struct mlx5_vdpa_net *ndev; struct mlx5_core_dev *mdev; @@ -2023,10 +2030,17 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) if (err) goto err_mtu; + if (!is_zero_ether_addr(config->mac)) { + pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev)); + err = mlx5_mpfs_add_mac(pfmdev, config->mac); + if (err) + goto err_mtu; + } + mvdev->vdev.dma_dev = mdev->device; err = mlx5_vdpa_alloc_resources(&ndev->mvdev); if (err) - goto err_mtu; + goto err_mpfs; err = alloc_resources(ndev); if (err) @@ -2044,6 +2058,9 @@ err_reg: free_resources(ndev); err_res: mlx5_vdpa_free_resources(&ndev->mvdev); +err_mpfs: + if (!is_zero_ether_addr(config->mac)) + mlx5_mpfs_del_mac(pfmdev, config->mac); err_mtu: mutex_destroy(&ndev->reslock); put_device(&mvdev->vdev.dev); diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h new file mode 100644 index 000000000000..bf700c8d5516 --- /dev/null +++ b/include/linux/mlx5/mpfs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB + * Copyright (c) 2021 Mellanox Technologies Ltd. + */ + +#ifndef _MLX5_MPFS_ +#define _MLX5_MPFS_ + +struct mlx5_core_dev; + +#ifdef CONFIG_MLX5_MPFS +int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac); +int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac); +#else /* #ifndef CONFIG_MLX5_MPFS */ +static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +#endif + +#endif -- cgit v1.2.3-71-gd317 From ba6e1d8422bd476ad79da409639a773c02f0cbad Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 14 May 2021 22:04:36 +0200 Subject: platform/surface: aggregator: avoid clang -Wconstant-conversion warning Clang complains about the assignment of SSAM_ANY_IID to ssam_device_uid->instance: drivers/platform/surface/surface_aggregator_registry.c:478:25: error: implicit conversion from 'int' to '__u8' (aka 'unsigned char') changes value from 65535 to 255 [-Werror,-Wconstant-conversion] { SSAM_VDEV(HUB, 0x02, SSAM_ANY_IID, 0x00) }, ~ ^~~~~~~~~~~~ include/linux/surface_aggregator/device.h:71:23: note: expanded from macro 'SSAM_ANY_IID' #define SSAM_ANY_IID 0xffff ^~~~~~ include/linux/surface_aggregator/device.h:126:63: note: expanded from macro 'SSAM_VDEV' SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, tid, iid, fun) ^~~ include/linux/surface_aggregator/device.h:102:41: note: expanded from macro 'SSAM_DEVICE' .instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0, \ ^~~ The assignment doesn't actually happen, but clang checks the type limits before checking whether this assignment is reached. Replace the ?: operator with a __builtin_choose_expr() invocation that avoids the warning for the untaken part. Fixes: eb0e90a82098 ("platform/surface: aggregator: Add dedicated bus and device type") Cc: platform-driver-x86@vger.kernel.org Signed-off-by: Arnd Bergmann Reviewed-by: Nathan Chancellor Reviewed-by: Maximilian Luz Link: https://lore.kernel.org/r/20210514200453.1542978-1-arnd@kernel.org Signed-off-by: Hans de Goede --- include/linux/surface_aggregator/device.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h index 4441ad667c3f..6ff9c58b3e17 100644 --- a/include/linux/surface_aggregator/device.h +++ b/include/linux/surface_aggregator/device.h @@ -98,9 +98,9 @@ struct ssam_device_uid { | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \ .domain = d, \ .category = cat, \ - .target = ((tid) != SSAM_ANY_TID) ? (tid) : 0, \ - .instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0, \ - .function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0 \ + .target = __builtin_choose_expr((tid) != SSAM_ANY_TID, (tid), 0), \ + .instance = __builtin_choose_expr((iid) != SSAM_ANY_IID, (iid), 0), \ + .function = __builtin_choose_expr((fun) != SSAM_ANY_FUN, (fun), 0) /** * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with -- cgit v1.2.3-71-gd317 From 6c60ff048ca1e0739f39aa25996543c6e662a46c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 14 May 2021 15:18:41 +0200 Subject: block: prevent block device lookups at the beginning of del_gendisk As an artifact of how gendisk lookup used to work in earlier kernels, GENHD_FL_UP is only cleared very late in del_gendisk, and a global lock is used to prevent opens from succeeding while del_gendisk is tearing down the gendisk. Switch to clearing the flag early and under bd_mutex so that callers can use bd_mutex to stabilize the flag, which removes the need for the global mutex. Signed-off-by: Christoph Hellwig Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/20210514131842.1600568-2-hch@lst.de Signed-off-by: Jens Axboe --- block/genhd.c | 11 +---------- fs/block_dev.c | 15 +++++---------- include/linux/genhd.h | 2 -- 3 files changed, 6 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/block/genhd.c b/block/genhd.c index 39ca97b0edc6..9f8cb7beaad1 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -29,8 +29,6 @@ static struct kobject *block_depr; -DECLARE_RWSEM(bdev_lookup_sem); - /* for extended dynamic devt allocation, currently only one major is used */ #define NR_EXT_DEVT (1 << MINORBITS) static DEFINE_IDA(ext_devt_ida); @@ -609,13 +607,8 @@ void del_gendisk(struct gendisk *disk) blk_integrity_del(disk); disk_del_events(disk); - /* - * Block lookups of the disk until all bdevs are unhashed and the - * disk is marked as dead (GENHD_FL_UP cleared). - */ - down_write(&bdev_lookup_sem); - mutex_lock(&disk->part0->bd_mutex); + disk->flags &= ~GENHD_FL_UP; blk_drop_partitions(disk); mutex_unlock(&disk->part0->bd_mutex); @@ -629,8 +622,6 @@ void del_gendisk(struct gendisk *disk) remove_inode_hash(disk->part0->bd_inode); set_capacity(disk, 0); - disk->flags &= ~GENHD_FL_UP; - up_write(&bdev_lookup_sem); if (!(disk->flags & GENHD_FL_HIDDEN)) { sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); diff --git a/fs/block_dev.c b/fs/block_dev.c index eb265d72fce8..580bae995b87 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1298,6 +1298,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode) struct gendisk *disk = bdev->bd_disk; int ret = 0; + if (!(disk->flags & GENHD_FL_UP)) + return -ENXIO; + if (!bdev->bd_openers) { if (!bdev_is_partition(bdev)) { ret = 0; @@ -1332,8 +1335,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode) whole->bd_part_count++; mutex_unlock(&whole->bd_mutex); - if (!(disk->flags & GENHD_FL_UP) || - !bdev_nr_sectors(bdev)) { + if (!bdev_nr_sectors(bdev)) { __blkdev_put(whole, mode, 1); bdput(whole); return -ENXIO; @@ -1364,16 +1366,12 @@ struct block_device *blkdev_get_no_open(dev_t dev) struct block_device *bdev; struct gendisk *disk; - down_read(&bdev_lookup_sem); bdev = bdget(dev); if (!bdev) { - up_read(&bdev_lookup_sem); blk_request_module(dev); - down_read(&bdev_lookup_sem); - bdev = bdget(dev); if (!bdev) - goto unlock; + return NULL; } disk = bdev->bd_disk; @@ -1383,14 +1381,11 @@ struct block_device *blkdev_get_no_open(dev_t dev) goto put_disk; if (!try_module_get(bdev->bd_disk->fops->owner)) goto put_disk; - up_read(&bdev_lookup_sem); return bdev; put_disk: put_disk(disk); bdput: bdput(bdev); -unlock: - up_read(&bdev_lookup_sem); return NULL; } diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 7e9660ea967d..6fc26f7bdf71 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -306,8 +306,6 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, } #endif /* CONFIG_SYSFS */ -extern struct rw_semaphore bdev_lookup_sem; - dev_t blk_lookup_devt(const char *name, int partno); void blk_request_module(dev_t devt); #ifdef CONFIG_BLOCK -- cgit v1.2.3-71-gd317 From 80dd33cf72d1ab4f0af303f1fa242c6d6c8d328f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 14 May 2021 14:10:15 +0200 Subject: drivers: base: Fix device link removal When device_link_free() drops references to the supplier and consumer devices of the device link going away and the reference being dropped turns out to be the last one for any of those device objects, its ->release callback will be invoked and it may sleep which goes against the SRCU callback execution requirements. To address this issue, make the device link removal code carry out the device_link_free() actions preceded by SRCU synchronization from a separate work item (the "long" workqueue is used for that, because it does not matter when the device link memory is released and it may take time to get to that point) instead of using SRCU callbacks. While at it, make the code work analogously when SRCU is not enabled to reduce the differences between the SRCU and non-SRCU cases. Fixes: 843e600b8a2b ("driver core: Fix sleeping in invalid context during device link deletion") Cc: stable Reported-by: chenxiang (M) Tested-by: chenxiang (M) Reviewed-by: Saravana Kannan Signed-off-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/5722787.lOV4Wx5bFT@kreacher Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 37 +++++++++++++++++++++++-------------- include/linux/device.h | 6 ++---- 2 files changed, 25 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/core.c b/drivers/base/core.c index 628e33939aca..61c19641e1d0 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -194,6 +194,11 @@ int device_links_read_lock_held(void) { return srcu_read_lock_held(&device_links_srcu); } + +static void device_link_synchronize_removal(void) +{ + synchronize_srcu(&device_links_srcu); +} #else /* !CONFIG_SRCU */ static DECLARE_RWSEM(device_links_lock); @@ -224,6 +229,10 @@ int device_links_read_lock_held(void) return lockdep_is_held(&device_links_lock); } #endif + +static inline void device_link_synchronize_removal(void) +{ +} #endif /* !CONFIG_SRCU */ static bool device_is_ancestor(struct device *dev, struct device *target) @@ -445,8 +454,13 @@ static struct attribute *devlink_attrs[] = { }; ATTRIBUTE_GROUPS(devlink); -static void device_link_free(struct device_link *link) +static void device_link_release_fn(struct work_struct *work) { + struct device_link *link = container_of(work, struct device_link, rm_work); + + /* Ensure that all references to the link object have been dropped. */ + device_link_synchronize_removal(); + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put(link->supplier); @@ -455,24 +469,19 @@ static void device_link_free(struct device_link *link) kfree(link); } -#ifdef CONFIG_SRCU -static void __device_link_free_srcu(struct rcu_head *rhead) -{ - device_link_free(container_of(rhead, struct device_link, rcu_head)); -} - static void devlink_dev_release(struct device *dev) { struct device_link *link = to_devlink(dev); - call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); -} -#else -static void devlink_dev_release(struct device *dev) -{ - device_link_free(to_devlink(dev)); + INIT_WORK(&link->rm_work, device_link_release_fn); + /* + * It may take a while to complete this work because of the SRCU + * synchronization in device_link_release_fn() and if the consumer or + * supplier devices get deleted when it runs, so put it into the "long" + * workqueue. + */ + queue_work(system_long_wq, &link->rm_work); } -#endif static struct class devlink_class = { .name = "devlink", diff --git a/include/linux/device.h b/include/linux/device.h index 38a2071cf776..f1a00040fa53 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -570,7 +570,7 @@ struct device { * @flags: Link flags. * @rpm_active: Whether or not the consumer device is runtime-PM-active. * @kref: Count repeated addition of the same link. - * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. + * @rm_work: Work structure used for removing the link. * @supplier_preactivated: Supplier has been made active before consumer probe. */ struct device_link { @@ -583,9 +583,7 @@ struct device_link { u32 flags; refcount_t rpm_active; struct kref kref; -#ifdef CONFIG_SRCU - struct rcu_head rcu_head; -#endif + struct work_struct rm_work; bool supplier_preactivated; /* Owned by consumer probe. */ }; -- cgit v1.2.3-71-gd317 From f747e6667ebb2ffb8133486c9cd19800d72b0d98 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 22 May 2021 17:42:02 -0700 Subject: linux/bits.h: fix compilation error with GENMASK GENMASK() has an input check which uses __builtin_choose_expr() to enable a compile time sanity check of its inputs if they are known at compile time. However, it turns out that __builtin_constant_p() does not always return a compile time constant [0]. It was thought this problem was fixed with gcc 4.9 [1], but apparently this is not the case [2]. Switch to use __is_constexpr() instead which always returns a compile time constant, regardless of its inputs. Link: https://lore.kernel.org/lkml/42b4342b-aefc-a16a-0d43-9f9c0d63ba7a@rasmusvillemoes.dk [0] Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=19449 [1] Link: https://lore.kernel.org/lkml/1ac7bbc2-45d9-26ed-0b33-bf382b8d858b@I-love.SAKURA.ne.jp [2] Link: https://lkml.kernel.org/r/20210511203716.117010-1-rikard.falkeborn@gmail.com Signed-off-by: Rikard Falkeborn Reported-by: Tetsuo Handa Acked-by: Arnd Bergmann Reviewed-by: Andy Shevchenko Cc: Ard Biesheuvel Cc: Yury Norov Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/bits.h | 2 +- include/linux/const.h | 8 ++++++++ include/linux/minmax.h | 10 ++-------- tools/include/linux/bits.h | 2 +- tools/include/linux/const.h | 8 ++++++++ 5 files changed, 20 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bits.h b/include/linux/bits.h index 7f475d59a097..87d112650dfb 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -22,7 +22,7 @@ #include #define GENMASK_INPUT_CHECK(h, l) \ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ - __builtin_constant_p((l) > (h)), (l) > (h), 0))) + __is_constexpr((l) > (h)), (l) > (h), 0))) #else /* * BUILD_BUG_ON_ZERO is not available in h files included from asm files, diff --git a/include/linux/const.h b/include/linux/const.h index 81b8aae5a855..435ddd72d2c4 100644 --- a/include/linux/const.h +++ b/include/linux/const.h @@ -3,4 +3,12 @@ #include +/* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + #endif /* _LINUX_CONST_H */ diff --git a/include/linux/minmax.h b/include/linux/minmax.h index c0f57b0c64d9..5433c08fcc68 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -2,6 +2,8 @@ #ifndef _LINUX_MINMAX_H #define _LINUX_MINMAX_H +#include + /* * min()/max()/clamp() macros must accomplish three things: * @@ -17,14 +19,6 @@ #define __typecheck(x, y) \ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) -/* - * This returns a constant expression while determining if an argument is - * a constant expression, most importantly without evaluating the argument. - * Glory to Martin Uecker - */ -#define __is_constexpr(x) \ - (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) - #define __no_side_effects(x, y) \ (__is_constexpr(x) && __is_constexpr(y)) diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h index 7f475d59a097..87d112650dfb 100644 --- a/tools/include/linux/bits.h +++ b/tools/include/linux/bits.h @@ -22,7 +22,7 @@ #include #define GENMASK_INPUT_CHECK(h, l) \ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ - __builtin_constant_p((l) > (h)), (l) > (h), 0))) + __is_constexpr((l) > (h)), (l) > (h), 0))) #else /* * BUILD_BUG_ON_ZERO is not available in h files included from asm files, diff --git a/tools/include/linux/const.h b/tools/include/linux/const.h index 81b8aae5a855..435ddd72d2c4 100644 --- a/tools/include/linux/const.h +++ b/tools/include/linux/const.h @@ -3,4 +3,12 @@ #include +/* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + #endif /* _LINUX_CONST_H */ -- cgit v1.2.3-71-gd317 From 08b2b6fdf6b26032f025084ce2893924a0cdb4a2 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Mon, 24 May 2021 16:29:43 +0800 Subject: cgroup: fix spelling mistakes Fix some spelling mistakes in comments: hierarhcy ==> hierarchy automtically ==> automatically overriden ==> overridden In absense of .. or ==> In absence of .. and assocaited ==> associated taget ==> target initate ==> initiate succeded ==> succeeded curremt ==> current udpated ==> updated Signed-off-by: Zhen Lei Signed-off-by: Tejun Heo --- include/linux/cgroup-defs.h | 6 +++--- include/linux/cgroup.h | 2 +- kernel/cgroup/cgroup-v1.c | 2 +- kernel/cgroup/cgroup.c | 8 ++++---- kernel/cgroup/cpuset.c | 2 +- kernel/cgroup/rdma.c | 2 +- kernel/cgroup/rstat.c | 2 +- 7 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 559ee05f86b2..fb8f6d2cd104 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -232,7 +232,7 @@ struct css_set { struct list_head task_iters; /* - * On the default hierarhcy, ->subsys[ssid] may point to a css + * On the default hierarchy, ->subsys[ssid] may point to a css * attached to an ancestor instead of the cgroup this css_set is * associated with. The following node is anchored at * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to @@ -668,7 +668,7 @@ struct cgroup_subsys { */ bool threaded:1; - /* the following two fields are initialized automtically during boot */ + /* the following two fields are initialized automatically during boot */ int id; const char *name; @@ -757,7 +757,7 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. * On boot, sock_cgroup_data records the cgroup that the sock was created * in so that cgroup2 matches can be made; however, once either net_prio or - * net_cls starts being used, the area is overriden to carry prioidx and/or + * net_cls starts being used, the area is overridden to carry prioidx and/or * classid. The two modes are distinguished by whether the lowest bit is * set. Clear bit indicates cgroup pointer while set bit prioidx and * classid. diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 4f2f79de083e..6bc9c76680b2 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -32,7 +32,7 @@ struct kernel_clone_args; #ifdef CONFIG_CGROUPS /* - * All weight knobs on the default hierarhcy should use the following min, + * All weight knobs on the default hierarchy should use the following min, * default and max values. The default value is the logarithmic center of * MIN and MAX and allows 100x to be expressed in both directions. */ diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 391aa570369b..8190b6bfc978 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -1001,7 +1001,7 @@ static int check_cgroupfs_options(struct fs_context *fc) ctx->subsys_mask &= enabled; /* - * In absense of 'none', 'name=' or subsystem name options, + * In absence of 'none', 'name=' and subsystem name options, * let's default to 'all'. */ if (!ctx->subsys_mask && !ctx->none && !ctx->name) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index e7a9a2998245..21ecc6ee6a6d 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -468,7 +468,7 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, * @cgrp: the cgroup of interest * @ss: the subsystem of interest * - * Find and get @cgrp's css assocaited with @ss. If the css doesn't exist + * Find and get @cgrp's css associated with @ss. If the css doesn't exist * or is offline, %NULL is returned. */ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, @@ -1633,7 +1633,7 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) /** * css_clear_dir - remove subsys files in a cgroup directory - * @css: taget css + * @css: target css */ static void css_clear_dir(struct cgroup_subsys_state *css) { @@ -5350,7 +5350,7 @@ out_unlock: /* * This is called when the refcnt of a css is confirmed to be killed. * css_tryget_online() is now guaranteed to fail. Tell the subsystem to - * initate destruction and put the css ref from kill_css(). + * initiate destruction and put the css ref from kill_css(). */ static void css_killed_work_fn(struct work_struct *work) { @@ -6052,7 +6052,7 @@ out_revert: * @kargs: the arguments passed to create the child process * * This calls the cancel_fork() callbacks if a fork failed *after* - * cgroup_can_fork() succeded and cleans up references we took to + * cgroup_can_fork() succeeded and cleans up references we took to * prepare a new css_set for the child process in cgroup_can_fork(). */ void cgroup_cancel_fork(struct task_struct *child, diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index a945504c0ae7..adb5190c4429 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -3376,7 +3376,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) } /** - * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed + * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed * @nodemask: the nodemask to be checked * * Are any of the nodes in the nodemask allowed in current->mems_allowed? diff --git a/kernel/cgroup/rdma.c b/kernel/cgroup/rdma.c index ae042c347c64..3135406608c7 100644 --- a/kernel/cgroup/rdma.c +++ b/kernel/cgroup/rdma.c @@ -244,7 +244,7 @@ EXPORT_SYMBOL(rdmacg_uncharge); * This function follows charging resource in hierarchical way. * It will fail if the charge would cause the new value to exceed the * hierarchical limit. - * Returns 0 if the charge succeded, otherwise -EAGAIN, -ENOMEM or -EINVAL. + * Returns 0 if the charge succeeded, otherwise -EAGAIN, -ENOMEM or -EINVAL. * Returns pointer to rdmacg for this resource when charging is successful. * * Charger needs to account resources on two criteria. diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 3a3fd2993a65..cee265cb535c 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -75,7 +75,7 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) * @root: root of the tree to traversal * @cpu: target cpu * - * Walks the udpated rstat_cpu tree on @cpu from @root. %NULL @pos starts + * Walks the updated rstat_cpu tree on @cpu from @root. %NULL @pos starts * the traversal and %NULL return indicates the end. During traversal, * each returned cgroup is unlinked from the tree. Must be called with the * matching cgroup_rstat_cpu_lock held. -- cgit v1.2.3-71-gd317 From 1cb61759d40716643281b8e0f8c7afebc8699249 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 21 May 2021 09:26:10 +0200 Subject: init: verify that function is initcall_t at compile-time In the spirit of making it hard to misuse an interface, add a compile-time assertion in the CONFIG_HAVE_ARCH_PREL32_RELOCATIONS case to verify the initcall function matches initcall_t, because the inline asm bypasses any type-checking the compiler would otherwise do. This will help developers catch incorrect API use in all configurations. A recent example of this is: https://lkml.kernel.org/r/20210514140015.2944744-1-arnd@kernel.org Signed-off-by: Marco Elver Reviewed-by: Miguel Ojeda Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor Reviewed-by: Sami Tolvanen Tested-by: Paul E. McKenney Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20210521072610.2880286-1-elver@google.com --- include/linux/init.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/init.h b/include/linux/init.h index 045ad1650ed1..d82b4b2e1d25 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -242,7 +242,8 @@ extern bool initcall_debug; asm(".section \"" __sec "\", \"a\" \n" \ __stringify(__name) ": \n" \ ".long " __stringify(__stub) " - . \n" \ - ".previous \n"); + ".previous \n"); \ + static_assert(__same_type(initcall_t, &fn)); #else #define ____define_initcall(fn, __unused, __name, __sec) \ static initcall_t __name __used \ -- cgit v1.2.3-71-gd317 From a8b98c808eab3ec8f1b5a64be967b0f4af4cae43 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Mon, 24 May 2021 16:53:21 +0300 Subject: fanotify: fix permission model of unprivileged group Reporting event->pid should depend on the privileges of the user that initialized the group, not the privileges of the user reading the events. Use an internal group flag FANOTIFY_UNPRIV to record the fact that the group was initialized by an unprivileged user. To be on the safe side, the premissions to setup filesystem and mount marks now require that both the user that initialized the group and the user setting up the mark have CAP_SYS_ADMIN. Link: https://lore.kernel.org/linux-fsdevel/CAOQ4uxiA77_P5vtv7e83g0+9d7B5W9ZTE4GfQEYbWmfT1rA=VA@mail.gmail.com/ Fixes: 7cea2a3c505e ("fanotify: support limited functionality for unprivileged users") Cc: # v5.12+ Link: https://lore.kernel.org/r/20210524135321.2190062-1-amir73il@gmail.com Reviewed-by: Matthew Bobrowski Acked-by: Christian Brauner Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify_user.c | 30 ++++++++++++++++++++++++------ fs/notify/fdinfo.c | 2 +- include/linux/fanotify.h | 4 ++++ 3 files changed, 29 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 71fefb30e015..be5b6d2c01e7 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -424,11 +424,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, * events generated by the listener process itself, without disclosing * the pids of other processes. */ - if (!capable(CAP_SYS_ADMIN) && + if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && task_tgid(current) != event->pid) metadata.pid = 0; - if (path && path->mnt && path->dentry) { + /* + * For now, fid mode is required for an unprivileged listener and + * fid mode does not report fd in events. Keep this check anyway + * for safety in case fid mode requirement is relaxed in the future + * to allow unprivileged listener to get events with no fd and no fid. + */ + if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && + path && path->mnt && path->dentry) { fd = create_fd(group, path, &f); if (fd < 0) return fd; @@ -1040,6 +1047,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) int f_flags, fd; unsigned int fid_mode = flags & FANOTIFY_FID_BITS; unsigned int class = flags & FANOTIFY_CLASS_BITS; + unsigned int internal_flags = 0; pr_debug("%s: flags=%x event_f_flags=%x\n", __func__, flags, event_f_flags); @@ -1053,6 +1061,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) */ if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode) return -EPERM; + + /* + * Setting the internal flag FANOTIFY_UNPRIV on the group + * prevents setting mount/filesystem marks on this group and + * prevents reporting pid and open fd in events. + */ + internal_flags |= FANOTIFY_UNPRIV; } #ifdef CONFIG_AUDITSYSCALL @@ -1105,7 +1120,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) goto out_destroy_group; } - group->fanotify_data.flags = flags; + group->fanotify_data.flags = flags | internal_flags; group->memcg = get_mem_cgroup_from_mm(current->mm); group->fanotify_data.merge_hash = fanotify_alloc_merge_hash(); @@ -1305,11 +1320,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, group = f.file->private_data; /* - * An unprivileged user is not allowed to watch a mount point nor - * a filesystem. + * An unprivileged user is not allowed to setup mount nor filesystem + * marks. This also includes setting up such marks by a group that + * was initialized by an unprivileged user. */ ret = -EPERM; - if (!capable(CAP_SYS_ADMIN) && + if ((!capable(CAP_SYS_ADMIN) || + FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) && mark_type != FAN_MARK_INODE) goto fput_and_out; @@ -1460,6 +1477,7 @@ static int __init fanotify_user_setup(void) max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS, FANOTIFY_DEFAULT_MAX_USER_MARKS); + BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9); diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c index a712b2aaa9ac..57f0d5d9f934 100644 --- a/fs/notify/fdinfo.c +++ b/fs/notify/fdinfo.c @@ -144,7 +144,7 @@ void fanotify_show_fdinfo(struct seq_file *m, struct file *f) struct fsnotify_group *group = f->private_data; seq_printf(m, "fanotify flags:%x event-flags:%x\n", - group->fanotify_data.flags, + group->fanotify_data.flags & FANOTIFY_INIT_FLAGS, group->fanotify_data.f_flags); show_fdinfo(m, f, fanotify_fdinfo); diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index bad41bcb25df..a16dbeced152 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -51,6 +51,10 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */ #define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \ FANOTIFY_USER_INIT_FLAGS) +/* Internal group flags */ +#define FANOTIFY_UNPRIV 0x80000000 +#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV) + #define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \ FAN_MARK_FILESYSTEM) -- cgit v1.2.3-71-gd317 From e86be3a04bc4aeaf12f93af35f08f8d4385bcd98 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 25 May 2021 18:43:38 -0400 Subject: SUNRPC: More fixes for backlog congestion Ensure that we fix the XPRT_CONGESTED starvation issue for RDMA as well as socket based transports. Ensure we always initialise the request after waking up from the backlog list. Fixes: e877a88d1f06 ("SUNRPC in case of backlog, hand free slots directly to waiting task") Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 2 ++ net/sunrpc/xprt.c | 58 ++++++++++++++++++++--------------------- net/sunrpc/xprtrdma/transport.c | 12 ++++----- net/sunrpc/xprtrdma/verbs.c | 18 ++++++++++--- net/sunrpc/xprtrdma/xprt_rdma.h | 1 + 5 files changed, 52 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index d81fe8b364d0..61b622e334ee 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -368,6 +368,8 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size, unsigned int num_prealloc, unsigned int max_req); void xprt_free(struct rpc_xprt *); +void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task); +bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req); static inline int xprt_enable_swap(struct rpc_xprt *xprt) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 5b3981fd3783..3509a7f139b9 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1607,11 +1607,18 @@ xprt_transmit(struct rpc_task *task) spin_unlock(&xprt->queue_lock); } -static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) +static void xprt_complete_request_init(struct rpc_task *task) +{ + if (task->tk_rqstp) + xprt_request_init(task); +} + +void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) { set_bit(XPRT_CONGESTED, &xprt->state); - rpc_sleep_on(&xprt->backlog, task, NULL); + rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init); } +EXPORT_SYMBOL_GPL(xprt_add_backlog); static bool __xprt_set_rq(struct rpc_task *task, void *data) { @@ -1619,14 +1626,13 @@ static bool __xprt_set_rq(struct rpc_task *task, void *data) if (task->tk_rqstp == NULL) { memset(req, 0, sizeof(*req)); /* mark unused */ - task->tk_status = -EAGAIN; task->tk_rqstp = req; return true; } return false; } -static bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req) +bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req) { if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) { clear_bit(XPRT_CONGESTED, &xprt->state); @@ -1634,6 +1640,7 @@ static bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req) } return true; } +EXPORT_SYMBOL_GPL(xprt_wake_up_backlog); static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) { @@ -1643,7 +1650,7 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task goto out; spin_lock(&xprt->reserve_lock); if (test_bit(XPRT_CONGESTED, &xprt->state)) { - rpc_sleep_on(&xprt->backlog, task, NULL); + xprt_add_backlog(xprt, task); ret = true; } spin_unlock(&xprt->reserve_lock); @@ -1812,10 +1819,6 @@ xprt_request_init(struct rpc_task *task) struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req = task->tk_rqstp; - if (req->rq_task) - /* Already initialized */ - return; - req->rq_task = task; req->rq_xprt = xprt; req->rq_buffer = NULL; @@ -1876,10 +1879,8 @@ void xprt_retry_reserve(struct rpc_task *task) struct rpc_xprt *xprt = task->tk_xprt; task->tk_status = 0; - if (task->tk_rqstp != NULL) { - xprt_request_init(task); + if (task->tk_rqstp != NULL) return; - } task->tk_status = -EAGAIN; xprt_do_reserve(xprt, task); @@ -1904,24 +1905,21 @@ void xprt_release(struct rpc_task *task) } xprt = req->rq_xprt; - if (xprt) { - xprt_request_dequeue_xprt(task); - spin_lock(&xprt->transport_lock); - xprt->ops->release_xprt(xprt, task); - if (xprt->ops->release_request) - xprt->ops->release_request(task); - xprt_schedule_autodisconnect(xprt); - spin_unlock(&xprt->transport_lock); - if (req->rq_buffer) - xprt->ops->buf_free(task); - xdr_free_bvec(&req->rq_rcv_buf); - xdr_free_bvec(&req->rq_snd_buf); - if (req->rq_cred != NULL) - put_rpccred(req->rq_cred); - if (req->rq_release_snd_buf) - req->rq_release_snd_buf(req); - } else - xprt = task->tk_xprt; + xprt_request_dequeue_xprt(task); + spin_lock(&xprt->transport_lock); + xprt->ops->release_xprt(xprt, task); + if (xprt->ops->release_request) + xprt->ops->release_request(task); + xprt_schedule_autodisconnect(xprt); + spin_unlock(&xprt->transport_lock); + if (req->rq_buffer) + xprt->ops->buf_free(task); + xdr_free_bvec(&req->rq_rcv_buf); + xdr_free_bvec(&req->rq_snd_buf); + if (req->rq_cred != NULL) + put_rpccred(req->rq_cred); + if (req->rq_release_snd_buf) + req->rq_release_snd_buf(req); task->tk_rqstp = NULL; if (likely(!bc_prealloc(req))) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 09953597d055..19a49d26b1e4 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -520,9 +520,8 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) return; out_sleep: - set_bit(XPRT_CONGESTED, &xprt->state); - rpc_sleep_on(&xprt->backlog, task, NULL); task->tk_status = -EAGAIN; + xprt_add_backlog(xprt, task); } /** @@ -537,10 +536,11 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst) struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt, rx_xprt); - memset(rqst, 0, sizeof(*rqst)); - rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); - if (unlikely(!rpc_wake_up_next(&xprt->backlog))) - clear_bit(XPRT_CONGESTED, &xprt->state); + rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); + if (!xprt_wake_up_backlog(xprt, rqst)) { + memset(rqst, 0, sizeof(*rqst)); + rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); + } } static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt, diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 1e965a380896..649c23518ec0 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -1200,6 +1200,20 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) return mr; } +/** + * rpcrdma_reply_put - Put reply buffers back into pool + * @buffers: buffer pool + * @req: object to return + * + */ +void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) +{ + if (req->rl_reply) { + rpcrdma_rep_put(buffers, req->rl_reply); + req->rl_reply = NULL; + } +} + /** * rpcrdma_buffer_get - Get a request buffer * @buffers: Buffer pool from which to obtain a buffer @@ -1228,9 +1242,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) */ void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) { - if (req->rl_reply) - rpcrdma_rep_put(buffers, req->rl_reply); - req->rl_reply = NULL; + rpcrdma_reply_put(buffers, req); spin_lock(&buffers->rb_lock); list_add(&req->rl_list, &buffers->rb_send_bufs); diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 436ad7312614..5d231d94e944 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -479,6 +479,7 @@ struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req); void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep); +void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req); bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags); -- cgit v1.2.3-71-gd317 From 62f3415db237b8d2aa9a804ff84ce2efa87df179 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 26 May 2021 11:46:17 -0700 Subject: net: phy: Document phydev::dev_flags bits allocation Document the phydev::dev_flags bit allocation to allow bits 15:0 to define PHY driver specific behavior, bits 23:16 to be reserved for now, and bits 31:24 to hold generic PHY driver flags. Signed-off-by: Florian Fainelli Reviewed-by: Russell King (Oracle) Link: https://lore.kernel.org/r/20210526184617.3105012-1-f.fainelli@gmail.com Signed-off-by: Jakub Kicinski --- include/linux/phy.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 60d2b26026a2..852743f07e3e 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -496,6 +496,11 @@ struct macsec_ops; * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY * @state: State of the PHY for management purposes * @dev_flags: Device-specific flags used by the PHY driver. + * Bits [15:0] are free to use by the PHY driver to communicate + * driver specific behavior. + * Bits [23:16] are currently reserved for future use. + * Bits [31:24] are reserved for defining generic + * PHY driver behavior. * @irq: IRQ number of the PHY's interrupt (-1 if none) * @phy_timer: The timer for handling the state machine * @phylink: Pointer to phylink instance for this PHY -- cgit v1.2.3-71-gd317 From 6bd5b743686243dae7351d5dcceeb7f171201bb4 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 18 May 2021 05:00:31 -0700 Subject: KVM: PPC: exit halt polling on need_resched() This is inspired by commit 262de4102c7bb8 (kvm: exit halt polling on need_resched() as well). Due to PPC implements an arch specific halt polling logic, we have to the need_resched() check there as well. This patch adds a helper function that can be shared between book3s and generic halt-polling loops. Reviewed-by: David Matlack Reviewed-by: Venkatesh Srinivas Cc: Ben Segall Cc: Venkatesh Srinivas Cc: Jim Mattson Cc: David Matlack Cc: Paul Mackerras Cc: Suraj Jitindar Singh Signed-off-by: Wanpeng Li Message-Id: <1621339235-11131-1-git-send-email-wanpengli@tencent.com> [Make the function inline. - Paolo] Signed-off-by: Paolo Bonzini --- arch/powerpc/kvm/book3s_hv.c | 2 +- include/linux/kvm_host.h | 6 ++++++ virt/kvm/kvm_main.c | 3 +-- 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 28a80d240b76..7360350e66ff 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3936,7 +3936,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) break; } cur = ktime_get(); - } while (single_task_running() && ktime_before(cur, stop)); + } while (kvm_vcpu_can_poll(cur, stop)); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2f34487e21f2..5d4b96b36ec0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -265,6 +266,11 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) return !!map->hva; } +static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop) +{ + return single_task_running() && !need_resched() && ktime_before(cur, stop); +} + /* * Sometimes a large or cross-page mmio needs to be broken up into separate * exits for userspace servicing. diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6b4feb92dc79..5f40725144f5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2973,8 +2973,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) goto out; } poll_end = cur = ktime_get(); - } while (single_task_running() && !need_resched() && - ktime_before(cur, stop)); + } while (kvm_vcpu_can_poll(cur, stop)); } prepare_to_rcuwait(&vcpu->wait); -- cgit v1.2.3-71-gd317 From 084071d5e9226add45a6031928bf10e6afc855fd Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 25 May 2021 10:41:17 -0300 Subject: KVM: rename KVM_REQ_PENDING_TIMER to KVM_REQ_UNBLOCK KVM_REQ_UNBLOCK will be used to exit a vcpu from its inner vcpu halt emulation loop. Rename KVM_REQ_PENDING_TIMER to KVM_REQ_UNBLOCK, switch PowerPC to arch specific request bit. Signed-off-by: Marcelo Tosatti Message-Id: <20210525134321.303768132@redhat.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/vcpu-requests.rst | 8 +++++--- arch/powerpc/include/asm/kvm_host.h | 1 + arch/x86/kvm/lapic.c | 2 +- arch/x86/kvm/x86.c | 2 +- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 2 ++ 6 files changed, 11 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst index 5feb3706a7ae..af1b37441e0a 100644 --- a/Documentation/virt/kvm/vcpu-requests.rst +++ b/Documentation/virt/kvm/vcpu-requests.rst @@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD necessary to inform each VCPU to completely refresh the tables. This request is used for that. -KVM_REQ_PENDING_TIMER +KVM_REQ_UNBLOCK - This request may be made from a timer handler run on the host on behalf - of a VCPU. It informs the VCPU thread to inject a timer interrupt. + This request informs the vCPU to exit kvm_vcpu_block. It is used for + example from timer handlers that run on the host on behalf of a vCPU, + or in order to update the interrupt routing and ensure that assigned + devices will wake up the vCPU. KVM_REQ_UNHALT diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1e83359f286b..7f2e90db2050 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -51,6 +51,7 @@ /* PPC-specific vcpu->requests bit members */ #define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0) #define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1) +#define KVM_REQ_PENDING_TIMER KVM_ARCH_REQ(2) #include diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5d91f2367c31..8120e8614b92 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1669,7 +1669,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn) } atomic_inc(&apic->lapic_timer.pending); - kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); + kvm_make_request(KVM_REQ_UNBLOCK, vcpu); if (from_timer_fn) kvm_vcpu_kick(vcpu); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 98538b1cb453..fe464b66898f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9501,7 +9501,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) if (r <= 0) break; - kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu); + kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5d4b96b36ec0..76102efbf079 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -147,7 +147,7 @@ static inline bool is_error_page(struct page *page) */ #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_PENDING_TIMER 2 +#define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 #define KVM_REQUEST_ARCH_BASE 8 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5f40725144f5..37a2d500a148 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2929,6 +2929,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) goto out; if (signal_pending(current)) goto out; + if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) + goto out; ret = 0; out: -- cgit v1.2.3-71-gd317