From 773f91b2cf3f52df0d7508fdbf60f37567cdaee4 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 1 Apr 2022 17:08:21 -0400 Subject: SUNRPC: Fix NFSD's request deferral on RDMA transports Trond Myklebust reports an NFSD crash in svc_rdma_sendto(). Further investigation shows that the crash occurred while NFSD was handling a deferred request. This patch addresses two inter-related issues that prevent request deferral from working correctly for RPC/RDMA requests: 1. Prevent the crash by ensuring that the original svc_rqst::rq_xprt_ctxt value is available when the request is revisited. Otherwise svc_rdma_sendto() does not have a Receive context available with which to construct its reply. 2. Possibly since before commit 71641d99ce03 ("svcrdma: Properly compute .len and .buflen for received RPC Calls"), svc_rdma_recvfrom() did not include the transport header in the returned xdr_buf. There should have been no need for svc_defer() and friends to save and restore that header, as of that commit. This issue is addressed in a backport-friendly way by simply having svc_rdma_recvfrom() set rq_xprt_hlen to zero unconditionally, just as svc_tcp_recvfrom() does. This enables svc_deferred_recv() to correctly reconstruct an RPC message received via RPC/RDMA. Reported-by: Trond Myklebust Link: https://lore.kernel.org/linux-nfs/82662b7190f26fb304eb0ab1bb04279072439d4e.camel@hammerspace.com/ Signed-off-by: Chuck Lever Cc: --- include/linux/sunrpc/svc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index a5dda4987e8b..217711fc9cac 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -395,6 +395,7 @@ struct svc_deferred_req { size_t addrlen; struct sockaddr_storage daddr; /* where reply must come from */ size_t daddrlen; + void *xprt_ctxt; struct cache_deferred_req handle; size_t xprt_hlen; int argslen; -- cgit v1.2.3-71-gd317 From 0c2cae09a765b1c1d842eb9328982976ec735926 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 17 Mar 2022 11:33:11 +0200 Subject: gpiolib: acpi: Convert type for pin to be unsigned A pin that comes from ACPI tables is of unsigned type. This also applies to the internal APIs which use unsigned int to store the pin. Convert type for pin to be unsigned in the places where it's not yet true. While at it, add a stub for acpi_get_and_request_gpiod() for the sake of consistency in the APIs. Signed-off-by: Andy Shevchenko --- drivers/gpio/gpiolib-acpi.c | 18 ++++++++++-------- include/linux/gpio/consumer.h | 8 +++++++- 2 files changed, 17 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index b7c2f2af1dee..c2523ac26fac 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -108,7 +108,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) * controller does not have GPIO chip registered at the moment. This is to * support probe deferral. */ -static struct gpio_desc *acpi_get_gpiod(char *path, int pin) +static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin) { struct gpio_chip *chip; acpi_handle handle; @@ -136,7 +136,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) * as it is intended for use outside of the GPIO layer (in a similar fashion to * gpiod_get_index() for example) it also holds a reference to the GPIO device. */ -struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label) +struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label) { struct gpio_desc *gpio; int ret; @@ -317,11 +317,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, return desc; } -static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in) +static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in) { const char *controller, *pin_str; - int len, pin; + unsigned int pin; char *endp; + int len; controller = ignore_wake; while (controller) { @@ -354,13 +355,13 @@ err: static bool acpi_gpio_irq_is_wake(struct device *parent, struct acpi_resource_gpio *agpio) { - int pin = agpio->pin_table[0]; + unsigned int pin = agpio->pin_table[0]; if (agpio->wake_capable != ACPI_WAKE_CAPABLE) return false; if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) { - dev_info(parent, "Ignoring wakeup on pin %d\n", pin); + dev_info(parent, "Ignoring wakeup on pin %u\n", pin); return false; } @@ -378,7 +379,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, struct acpi_gpio_event *event; irq_handler_t handler = NULL; struct gpio_desc *desc; - int ret, pin, irq; + unsigned int pin; + int ret, irq; if (!acpi_gpio_get_irq_resource(ares, &agpio)) return AE_OK; @@ -1098,7 +1100,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, length = min_t(u16, agpio->pin_table_length, pin_index + bits); for (i = pin_index; i < length; ++i) { - int pin = agpio->pin_table[i]; + unsigned int pin = agpio->pin_table[i]; struct acpi_gpio_connection *conn; struct gpio_desc *desc; bool found; diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index c3aa8b330e1c..e71f6e1bfafe 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -688,7 +688,7 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev); int devm_acpi_dev_add_driver_gpios(struct device *dev, const struct acpi_gpio_mapping *gpios); -struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label); +struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label); #else /* CONFIG_GPIOLIB && CONFIG_ACPI */ @@ -705,6 +705,12 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, return -ENXIO; } +static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, + char *label) +{ + return ERR_PTR(-ENOSYS); +} + #endif /* CONFIG_GPIOLIB && CONFIG_ACPI */ -- cgit v1.2.3-71-gd317 From 2c547f299827c12244d613eb2ee3616d88f56088 Mon Sep 17 00:00:00 2001 From: Yue Hu Date: Wed, 6 Apr 2022 11:50:17 +0800 Subject: fscache: Remove the cookie parameter from fscache_clear_page_bits() The cookie is not used at all, remove it and update the usage in io.c and afs/write.c (which is the only user outside of fscache currently) at the same time. [DH: Amended the documentation also] Signed-off-by: Yue Hu Signed-off-by: David Howells cc: linux-cachefs@redhat.com Link: https://listman.redhat.com/archives/linux-cachefs/2022-April/006659.html --- Documentation/filesystems/caching/netfs-api.rst | 25 ++++++++++++------------- fs/afs/write.c | 3 +-- fs/fscache/io.c | 5 ++--- include/linux/fscache.h | 4 +--- 4 files changed, 16 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/caching/netfs-api.rst b/Documentation/filesystems/caching/netfs-api.rst index 5066113acad5..7308d76a29dc 100644 --- a/Documentation/filesystems/caching/netfs-api.rst +++ b/Documentation/filesystems/caching/netfs-api.rst @@ -404,22 +404,21 @@ schedule a write of that region:: And if an error occurs before that point is reached, the marks can be removed by calling:: - void fscache_clear_page_bits(struct fscache_cookie *cookie, - struct address_space *mapping, + void fscache_clear_page_bits(struct address_space *mapping, loff_t start, size_t len, bool caching) -In both of these functions, the cookie representing the cache object to be -written to and a pointer to the mapping to which the source pages are attached -are passed in; start and len indicate the size of the region that's going to be -written (it doesn't have to align to page boundaries necessarily, but it does -have to align to DIO boundaries on the backing filesystem). The caching -parameter indicates if caching should be skipped, and if false, the functions -do nothing. - -The write function takes some additional parameters: i_size indicates the size -of the netfs file and term_func indicates an optional completion function, to -which term_func_priv will be passed, along with the error or amount written. +In these functions, a pointer to the mapping to which the source pages are +attached is passed in and start and len indicate the size of the region that's +going to be written (it doesn't have to align to page boundaries necessarily, +but it does have to align to DIO boundaries on the backing filesystem). The +caching parameter indicates if caching should be skipped, and if false, the +functions do nothing. + +The write function takes some additional parameters: the cookie representing +the cache object to be written to, i_size indicates the size of the netfs file +and term_func indicates an optional completion function, to which +term_func_priv will be passed, along with the error or amount written. Note that the write function will always run asynchronously and will unmark all the pages upon completion before calling term_func. diff --git a/fs/afs/write.c b/fs/afs/write.c index 6bcf1475511b..4763132ca57e 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -616,8 +616,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, _debug("write discard %x @%llx [%llx]", len, start, i_size); /* The dirty region was entirely beyond the EOF. */ - fscache_clear_page_bits(afs_vnode_cache(vnode), - mapping, start, len, caching); + fscache_clear_page_bits(mapping, start, len, caching); afs_pages_written_back(vnode, start, len); ret = 0; } diff --git a/fs/fscache/io.c b/fs/fscache/io.c index c8c7fe9e9a6e..3af3b08a9bb3 100644 --- a/fs/fscache/io.c +++ b/fs/fscache/io.c @@ -235,8 +235,7 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error, { struct fscache_write_request *wreq = priv; - fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources), - wreq->mapping, wreq->start, wreq->len, + fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len, wreq->set_bits); if (wreq->term_func) @@ -296,7 +295,7 @@ abandon_end: abandon_free: kfree(wreq); abandon: - fscache_clear_page_bits(cookie, mapping, start, len, cond); + fscache_clear_page_bits(mapping, start, len, cond); if (term_func) term_func(term_func_priv, ret, false); } diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 6727fb0db619..e25539072463 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -573,7 +573,6 @@ int fscache_write(struct netfs_cache_resources *cres, /** * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages - * @cookie: The cookie representing the cache object * @mapping: The netfs inode to use as the source * @start: The start position in @mapping * @len: The amount of data to unlock @@ -582,8 +581,7 @@ int fscache_write(struct netfs_cache_resources *cres, * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's * waiting. */ -static inline void fscache_clear_page_bits(struct fscache_cookie *cookie, - struct address_space *mapping, +static inline void fscache_clear_page_bits(struct address_space *mapping, loff_t start, size_t len, bool caching) { -- cgit v1.2.3-71-gd317 From 868e6139c5212e7d9de8332806aacfeafb349320 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Sun, 27 Mar 2022 11:33:16 -0600 Subject: block: move lower_48_bits() to block The function is not generally applicable enough to be included in the core kernel header. Move it to block since it's the only subsystem using it. Suggested-by: Linus Torvalds Signed-off-by: Keith Busch Link: https://lore.kernel.org/r/20220327173316.315-1-kbusch@kernel.org Signed-off-by: Jens Axboe --- include/linux/kernel.h | 9 --------- include/linux/t10-pi.h | 9 +++++++++ 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 08ba5995aa8b..a890428bcc1a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -63,15 +63,6 @@ } \ ) -/** - * lower_48_bits() - return bits 0-47 of a number - * @n: the number we're accessing - */ -static inline u64 lower_48_bits(u64 n) -{ - return n & ((1ull << 48) - 1); -} - /** * upper_32_bits - return bits 32-63 of a number * @n: the number we're accessing diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index a4b1af581f69..248f4ac95642 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -59,6 +59,15 @@ struct crc64_pi_tuple { __u8 ref_tag[6]; }; +/** + * lower_48_bits() - return bits 0-47 of a number + * @n: the number we're accessing + */ +static inline u64 lower_48_bits(u64 n) +{ + return n & ((1ull << 48) - 1); +} + static inline u64 ext_pi_ref_tag(struct request *rq) { unsigned int shift = ilog2(queue_logical_block_size(rq->q)); -- cgit v1.2.3-71-gd317 From b0c3e796f24b588b862b61ce235d3c9417dc8983 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Fri, 8 Apr 2022 18:14:57 +0200 Subject: random: make random_get_entropy() return an unsigned long Some implementations were returning type `unsigned long`, while others that fell back to get_cycles() were implicitly returning a `cycles_t` or an untyped constant int literal. That makes for weird and confusing code, and basically all code in the kernel already handled it like it was an `unsigned long`. I recently tried to handle it as the largest type it could be, a `cycles_t`, but doing so doesn't really help with much. Instead let's just make random_get_entropy() return an unsigned long all the time. This also matches the commonly used `arch_get_random_long()` function, so now RDRAND and RDTSC return the same sized integer, which means one can fallback to the other more gracefully. Cc: Dominik Brodowski Cc: Theodore Ts'o Acked-by: Thomas Gleixner Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 20 +++++++------------- include/linux/timex.h | 2 +- 2 files changed, 8 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index df43c5060f00..6b01b2be9dd4 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1018,7 +1018,7 @@ int __init rand_initialize(void) */ void add_device_randomness(const void *buf, size_t size) { - cycles_t cycles = random_get_entropy(); + unsigned long cycles = random_get_entropy(); unsigned long flags, now = jiffies; if (crng_init == 0 && size) @@ -1049,8 +1049,7 @@ struct timer_rand_state { */ static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) { - cycles_t cycles = random_get_entropy(); - unsigned long flags, now = jiffies; + unsigned long cycles = random_get_entropy(), now = jiffies, flags; long delta, delta2, delta3; spin_lock_irqsave(&input_pool.lock, flags); @@ -1339,8 +1338,7 @@ static void mix_interrupt_randomness(struct work_struct *work) void add_interrupt_randomness(int irq) { enum { MIX_INFLIGHT = 1U << 31 }; - cycles_t cycles = random_get_entropy(); - unsigned long now = jiffies; + unsigned long cycles = random_get_entropy(), now = jiffies; struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); struct pt_regs *regs = get_irq_regs(); unsigned int new_count; @@ -1353,16 +1351,12 @@ void add_interrupt_randomness(int irq) if (cycles == 0) cycles = get_reg(fast_pool, regs); - if (sizeof(cycles) == 8) + if (sizeof(unsigned long) == 8) { irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq; - else { + irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_; + } else { irq_data.u32[0] = cycles ^ irq; irq_data.u32[1] = now; - } - - if (sizeof(unsigned long) == 8) - irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_; - else { irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_; irq_data.u32[3] = get_reg(fast_pool, regs); } @@ -1409,7 +1403,7 @@ static void entropy_timer(struct timer_list *t) static void try_to_generate_entropy(void) { struct { - cycles_t cycles; + unsigned long cycles; struct timer_list timer; } stack; diff --git a/include/linux/timex.h b/include/linux/timex.h index 059b18eb1f1f..5745c90c8800 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -75,7 +75,7 @@ * By default we use get_cycles() for this purpose, but individual * architectures may override this in their asm/timex.h header file. */ -#define random_get_entropy() get_cycles() +#define random_get_entropy() ((unsigned long)get_cycles()) #endif /* -- cgit v1.2.3-71-gd317 From f8e6b7babfeb40987e946bc1427609a9976017fa Mon Sep 17 00:00:00 2001 From: Karol Herbst Date: Mon, 11 Apr 2022 15:44:04 +0200 Subject: dma-buf-map: remove renamed header file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 7938f4218168 ("dma-buf-map: Rename to iosys-map") already renamed this file, but it got brought back by a merge. Delete it for real this time. Fixes: 30424ebae8df ("Merge tag 'drm-intel-gt-next-2022-02-17' of git://anongit.freedesktop.org/drm/drm-intel into drm-intel-next") Cc: Rodrigo Vivi Cc: Lucas De Marchi Cc: dri-devel@lists.freedesktop.org Signed-off-by: Karol Herbst Reviewed-by: Michel Dänzer Reviewed-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20220411134404.524776-1-kherbst@redhat.com --- include/linux/dma-buf-map.h | 266 -------------------------------------------- 1 file changed, 266 deletions(-) delete mode 100644 include/linux/dma-buf-map.h (limited to 'include/linux') diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h deleted file mode 100644 index 19fa0b5ae5ec..000000000000 --- a/include/linux/dma-buf-map.h +++ /dev/null @@ -1,266 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Pointer to dma-buf-mapped memory, plus helpers. - */ - -#ifndef __DMA_BUF_MAP_H__ -#define __DMA_BUF_MAP_H__ - -#include -#include - -/** - * DOC: overview - * - * Calling dma-buf's vmap operation returns a pointer to the buffer's memory. - * Depending on the location of the buffer, users may have to access it with - * I/O operations or memory load/store operations. For example, copying to - * system memory could be done with memcpy(), copying to I/O memory would be - * done with memcpy_toio(). - * - * .. code-block:: c - * - * void *vaddr = ...; // pointer to system memory - * memcpy(vaddr, src, len); - * - * void *vaddr_iomem = ...; // pointer to I/O memory - * memcpy_toio(vaddr, _iomem, src, len); - * - * When using dma-buf's vmap operation, the returned pointer is encoded as - * :c:type:`struct dma_buf_map `. - * :c:type:`struct dma_buf_map ` stores the buffer's address in - * system or I/O memory and a flag that signals the required method of - * accessing the buffer. Use the returned instance and the helper functions - * to access the buffer's memory in the correct way. - * - * The type :c:type:`struct dma_buf_map ` and its helpers are - * actually independent from the dma-buf infrastructure. When sharing buffers - * among devices, drivers have to know the location of the memory to access - * the buffers in a safe way. :c:type:`struct dma_buf_map ` - * solves this problem for dma-buf and its users. If other drivers or - * sub-systems require similar functionality, the type could be generalized - * and moved to a more prominent header file. - * - * Open-coding access to :c:type:`struct dma_buf_map ` is - * considered bad style. Rather then accessing its fields directly, use one - * of the provided helper functions, or implement your own. For example, - * instances of :c:type:`struct dma_buf_map ` can be initialized - * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with - * dma_buf_map_set_vaddr(). These helpers will set an address in system memory. - * - * .. code-block:: c - * - * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf); - * - * dma_buf_map_set_vaddr(&map, 0xdeadbeaf); - * - * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem(). - * - * .. code-block:: c - * - * dma_buf_map_set_vaddr_iomem(&map, 0xdeadbeaf); - * - * Instances of struct dma_buf_map do not have to be cleaned up, but - * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings - * always refer to system memory. - * - * .. code-block:: c - * - * dma_buf_map_clear(&map); - * - * Test if a mapping is valid with either dma_buf_map_is_set() or - * dma_buf_map_is_null(). - * - * .. code-block:: c - * - * if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map)) - * // always true - * - * Instances of :c:type:`struct dma_buf_map ` can be compared - * for equality with dma_buf_map_is_equal(). Mappings the point to different - * memory spaces, system or I/O, are never equal. That's even true if both - * spaces are located in the same address space, both mappings contain the - * same address value, or both mappings refer to NULL. - * - * .. code-block:: c - * - * struct dma_buf_map sys_map; // refers to system memory - * struct dma_buf_map io_map; // refers to I/O memory - * - * if (dma_buf_map_is_equal(&sys_map, &io_map)) - * // always false - * - * A set up instance of struct dma_buf_map can be used to access or manipulate - * the buffer memory. Depending on the location of the memory, the provided - * helpers will pick the correct operations. Data can be copied into the memory - * with dma_buf_map_memcpy_to(). The address can be manipulated with - * dma_buf_map_incr(). - * - * .. code-block:: c - * - * const void *src = ...; // source buffer - * size_t len = ...; // length of src - * - * dma_buf_map_memcpy_to(&map, src, len); - * dma_buf_map_incr(&map, len); // go to first byte after the memcpy - */ - -/** - * struct dma_buf_map - Pointer to vmap'ed dma-buf memory. - * @vaddr_iomem: The buffer's address if in I/O memory - * @vaddr: The buffer's address if in system memory - * @is_iomem: True if the dma-buf memory is located in I/O - * memory, or false otherwise. - */ -struct dma_buf_map { - union { - void __iomem *vaddr_iomem; - void *vaddr; - }; - bool is_iomem; -}; - -/** - * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory - * @vaddr_: A system-memory address - */ -#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \ - { \ - .vaddr = (vaddr_), \ - .is_iomem = false, \ - } - -/** - * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory - * @map: The dma-buf mapping structure - * @vaddr: A system-memory address - * - * Sets the address and clears the I/O-memory flag. - */ -static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr) -{ - map->vaddr = vaddr; - map->is_iomem = false; -} - -/** - * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory - * @map: The dma-buf mapping structure - * @vaddr_iomem: An I/O-memory address - * - * Sets the address and the I/O-memory flag. - */ -static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map, - void __iomem *vaddr_iomem) -{ - map->vaddr_iomem = vaddr_iomem; - map->is_iomem = true; -} - -/** - * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality - * @lhs: The dma-buf mapping structure - * @rhs: A dma-buf mapping structure to compare with - * - * Two dma-buf mapping structures are equal if they both refer to the same type of memory - * and to the same address within that memory. - * - * Returns: - * True is both structures are equal, or false otherwise. - */ -static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs, - const struct dma_buf_map *rhs) -{ - if (lhs->is_iomem != rhs->is_iomem) - return false; - else if (lhs->is_iomem) - return lhs->vaddr_iomem == rhs->vaddr_iomem; - else - return lhs->vaddr == rhs->vaddr; -} - -/** - * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL - * @map: The dma-buf mapping structure - * - * Depending on the state of struct dma_buf_map.is_iomem, tests if the - * mapping is NULL. - * - * Returns: - * True if the mapping is NULL, or false otherwise. - */ -static inline bool dma_buf_map_is_null(const struct dma_buf_map *map) -{ - if (map->is_iomem) - return !map->vaddr_iomem; - return !map->vaddr; -} - -/** - * dma_buf_map_is_set - Tests is the dma-buf mapping has been set - * @map: The dma-buf mapping structure - * - * Depending on the state of struct dma_buf_map.is_iomem, tests if the - * mapping has been set. - * - * Returns: - * True if the mapping is been set, or false otherwise. - */ -static inline bool dma_buf_map_is_set(const struct dma_buf_map *map) -{ - return !dma_buf_map_is_null(map); -} - -/** - * dma_buf_map_clear - Clears a dma-buf mapping structure - * @map: The dma-buf mapping structure - * - * Clears all fields to zero; including struct dma_buf_map.is_iomem. So - * mapping structures that were set to point to I/O memory are reset for - * system memory. Pointers are cleared to NULL. This is the default. - */ -static inline void dma_buf_map_clear(struct dma_buf_map *map) -{ - if (map->is_iomem) { - map->vaddr_iomem = NULL; - map->is_iomem = false; - } else { - map->vaddr = NULL; - } -} - -/** - * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping - * @dst: The dma-buf mapping structure - * @src: The source buffer - * @len: The number of byte in src - * - * Copies data into a dma-buf mapping. The source buffer is in system - * memory. Depending on the buffer's location, the helper picks the correct - * method of accessing the memory. - */ -static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len) -{ - if (dst->is_iomem) - memcpy_toio(dst->vaddr_iomem, src, len); - else - memcpy(dst->vaddr, src, len); -} - -/** - * dma_buf_map_incr - Increments the address stored in a dma-buf mapping - * @map: The dma-buf mapping structure - * @incr: The number of bytes to increment - * - * Increments the address stored in a dma-buf mapping. Depending on the - * buffer's location, the correct value will be updated. - */ -static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr) -{ - if (map->is_iomem) - map->vaddr_iomem += incr; - else - map->vaddr += incr; -} - -#endif /* __DMA_BUF_MAP_H__ */ -- cgit v1.2.3-71-gd317 From 1ef3342a934e235aca72b4bcc0d6854d80a65077 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Wed, 13 Apr 2022 10:10:36 -0300 Subject: vfio/pci: Fix vf_token mechanism when device-specific VF drivers are used get_pf_vdev() tries to check if a PF is a VFIO PF by looking at the driver: if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) { However now that we have multiple VF and PF drivers this is no longer reliable. This means that security tests realted to vf_token can be skipped by mixing and matching different VFIO PCI drivers. Instead of trying to use the driver core to find the PF devices maintain a linked list of all PF vfio_pci_core_device's that we have called pci_enable_sriov() on. When registering a VF just search the list to see if the PF is present and record the match permanently in the struct. PCI core locking prevents a PF from passing pci_disable_sriov() while VF drivers are attached so the VFIO owned PF becomes a static property of the VF. In common cases where vfio does not own the PF the global list remains empty and the VF's pointer is statically NULL. This also fixes a lockdep splat from recursive locking of the vfio_group::device_lock between vfio_device_get_from_name() and vfio_device_get_from_dev(). If the VF and PF share the same group this would deadlock. Fixes: ff53edf6d6ab ("vfio/pci: Split the pci_driver code out of vfio_pci_core.c") Signed-off-by: Jason Gunthorpe Link: https://lore.kernel.org/r/0-v3-876570980634+f2e8-vfio_vf_token_jgg@nvidia.com Signed-off-by: Alex Williamson --- drivers/vfio/pci/vfio_pci_core.c | 124 +++++++++++++++++++++++---------------- include/linux/vfio_pci_core.h | 2 + 2 files changed, 76 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index b7bb16f92ac6..06b6f3594a13 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -36,6 +36,10 @@ static bool nointxmask; static bool disable_vga; static bool disable_idle_d3; +/* List of PF's that vfio_pci_core_sriov_configure() has been called on */ +static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex); +static LIST_HEAD(vfio_pci_sriov_pfs); + static inline bool vfio_vga_disabled(void) { #ifdef CONFIG_VFIO_PCI_VGA @@ -434,47 +438,17 @@ out: } EXPORT_SYMBOL_GPL(vfio_pci_core_disable); -static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev) -{ - struct pci_dev *physfn = pci_physfn(vdev->pdev); - struct vfio_device *pf_dev; - - if (!vdev->pdev->is_virtfn) - return NULL; - - pf_dev = vfio_device_get_from_dev(&physfn->dev); - if (!pf_dev) - return NULL; - - if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) { - vfio_device_put(pf_dev); - return NULL; - } - - return container_of(pf_dev, struct vfio_pci_core_device, vdev); -} - -static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val) -{ - struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev); - - if (!pf_vdev) - return; - - mutex_lock(&pf_vdev->vf_token->lock); - pf_vdev->vf_token->users += val; - WARN_ON(pf_vdev->vf_token->users < 0); - mutex_unlock(&pf_vdev->vf_token->lock); - - vfio_device_put(&pf_vdev->vdev); -} - void vfio_pci_core_close_device(struct vfio_device *core_vdev) { struct vfio_pci_core_device *vdev = container_of(core_vdev, struct vfio_pci_core_device, vdev); - vfio_pci_vf_token_user_add(vdev, -1); + if (vdev->sriov_pf_core_dev) { + mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock); + WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users); + vdev->sriov_pf_core_dev->vf_token->users--; + mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock); + } vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_core_disable(vdev); @@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev) { vfio_pci_probe_mmaps(vdev); vfio_spapr_pci_eeh_open(vdev->pdev); - vfio_pci_vf_token_user_add(vdev, 1); + + if (vdev->sriov_pf_core_dev) { + mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock); + vdev->sriov_pf_core_dev->vf_token->users++; + mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock); + } } EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable); @@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, * * If the VF token is provided but unused, an error is generated. */ - if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token) - return 0; /* No VF token provided or required */ - if (vdev->pdev->is_virtfn) { - struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev); + struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev; bool match; if (!pf_vdev) { @@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, } if (!vf_token) { - vfio_device_put(&pf_vdev->vdev); pci_info_ratelimited(vdev->pdev, "VF token required to access device\n"); return -EACCES; @@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, match = uuid_equal(uuid, &pf_vdev->vf_token->uuid); mutex_unlock(&pf_vdev->vf_token->lock); - vfio_device_put(&pf_vdev->vdev); - if (!match) { pci_info_ratelimited(vdev->pdev, "Incorrect VF token provided for device\n"); @@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb, static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; + struct vfio_pci_core_device *cur; + struct pci_dev *physfn; int ret; + if (pdev->is_virtfn) { + /* + * If this VF was created by our vfio_pci_core_sriov_configure() + * then we can find the PF vfio_pci_core_device now, and due to + * the locking in pci_disable_sriov() it cannot change until + * this VF device driver is removed. + */ + physfn = pci_physfn(vdev->pdev); + mutex_lock(&vfio_pci_sriov_pfs_mutex); + list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) { + if (cur->pdev == physfn) { + vdev->sriov_pf_core_dev = cur; + break; + } + } + mutex_unlock(&vfio_pci_sriov_pfs_mutex); + return 0; + } + + /* Not a SRIOV PF */ if (!pdev->is_physfn) return 0; @@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev, INIT_LIST_HEAD(&vdev->ioeventfds_list); mutex_init(&vdev->vma_lock); INIT_LIST_HEAD(&vdev->vma_list); + INIT_LIST_HEAD(&vdev->sriov_pfs_item); init_rwsem(&vdev->memory_lock); } EXPORT_SYMBOL_GPL(vfio_pci_core_init_device); @@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; - pci_disable_sriov(pdev); + vfio_pci_core_sriov_configure(pdev, 0); vfio_unregister_group_dev(&vdev->vdev); @@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected); int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn) { + struct vfio_pci_core_device *vdev; struct vfio_device *device; int ret = 0; + device_lock_assert(&pdev->dev); + device = vfio_device_get_from_dev(&pdev->dev); if (!device) return -ENODEV; - if (nr_virtfn == 0) - pci_disable_sriov(pdev); - else + vdev = container_of(device, struct vfio_pci_core_device, vdev); + + if (nr_virtfn) { + mutex_lock(&vfio_pci_sriov_pfs_mutex); + /* + * The thread that adds the vdev to the list is the only thread + * that gets to call pci_enable_sriov() and we will only allow + * it to be called once without going through + * pci_disable_sriov() + */ + if (!list_empty(&vdev->sriov_pfs_item)) { + ret = -EINVAL; + goto out_unlock; + } + list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs); + mutex_unlock(&vfio_pci_sriov_pfs_mutex); ret = pci_enable_sriov(pdev, nr_virtfn); + if (ret) + goto out_del; + ret = nr_virtfn; + goto out_put; + } - vfio_device_put(device); + pci_disable_sriov(pdev); - return ret < 0 ? ret : nr_virtfn; +out_del: + mutex_lock(&vfio_pci_sriov_pfs_mutex); + list_del_init(&vdev->sriov_pfs_item); +out_unlock: + mutex_unlock(&vfio_pci_sriov_pfs_mutex); +out_put: + vfio_device_put(device); + return ret; } EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure); diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index 74a4a0f17b28..48f2dd3c568c 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -133,6 +133,8 @@ struct vfio_pci_core_device { struct mutex ioeventfds_lock; struct list_head ioeventfds_list; struct vfio_pci_vf_token *vf_token; + struct list_head sriov_pfs_item; + struct vfio_pci_core_device *sriov_pf_core_dev; struct notifier_block nb; struct mutex vma_lock; struct list_head vma_list; -- cgit v1.2.3-71-gd317 From 2dfe63e61cc31ee59ce951672b0850b5229cd5b0 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Apr 2022 19:13:40 -0700 Subject: mm, kfence: support kmem_dump_obj() for KFENCE objects Calling kmem_obj_info() via kmem_dump_obj() on KFENCE objects has been producing garbage data due to the object not actually being maintained by SLAB or SLUB. Fix this by implementing __kfence_obj_info() that copies relevant information to struct kmem_obj_info when the object was allocated by KFENCE; this is called by a common kmem_obj_info(), which also calls the slab/slub/slob specific variant now called __kmem_obj_info(). For completeness, kmem_dump_obj() now displays if the object was allocated by KFENCE. Link: https://lore.kernel.org/all/20220323090520.GG16885@xsang-OptiPlex-9020/ Link: https://lkml.kernel.org/r/20220406131558.3558585-1-elver@google.com Fixes: b89fb5ef0ce6 ("mm, kfence: insert KFENCE hooks for SLUB") Fixes: d3fb45f370d9 ("mm, kfence: insert KFENCE hooks for SLAB") Signed-off-by: Marco Elver Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reported-by: kernel test robot Acked-by: Vlastimil Babka [slab] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kfence.h | 24 ++++++++++++++++++++++++ mm/kfence/core.c | 21 --------------------- mm/kfence/kfence.h | 21 +++++++++++++++++++++ mm/kfence/report.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ mm/slab.c | 2 +- mm/slab.h | 2 +- mm/slab_common.c | 9 +++++++++ mm/slob.c | 2 +- mm/slub.c | 2 +- 9 files changed, 105 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kfence.h b/include/linux/kfence.h index f49e64222628..726857a4b680 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr) */ bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs); +#ifdef CONFIG_PRINTK +struct kmem_obj_info; +/** + * __kfence_obj_info() - fill kmem_obj_info struct + * @kpp: kmem_obj_info to be filled + * @object: the object + * + * Return: + * * false - not a KFENCE object + * * true - a KFENCE object, filled @kpp + * + * Copies information to @kpp for KFENCE objects. + */ +bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); +#endif + #else /* CONFIG_KFENCE */ static inline bool is_kfence_address(const void *addr) { return false; } @@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo return false; } +#ifdef CONFIG_PRINTK +struct kmem_obj_info; +static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +{ + return false; +} +#endif + #endif #endif /* _LINUX_KFENCE_H */ diff --git a/mm/kfence/core.c b/mm/kfence/core.c index a203747ad2c0..9b2b5f56f4ae 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr) return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); } -static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) -{ - long index; - - /* The checks do not affect performance; only called from slow-paths. */ - - if (!is_kfence_address((void *)addr)) - return NULL; - - /* - * May be an invalid index if called with an address at the edge of - * __kfence_pool, in which case we would report an "invalid access" - * error. - */ - index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; - if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) - return NULL; - - return &kfence_metadata[index]; -} - static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) { unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index 9a6c4b1b12a8..600f2e2431d6 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -96,6 +96,27 @@ struct kfence_metadata { extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; +static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) +{ + long index; + + /* The checks do not affect performance; only called from slow-paths. */ + + if (!is_kfence_address((void *)addr)) + return NULL; + + /* + * May be an invalid index if called with an address at the edge of + * __kfence_pool, in which case we would report an "invalid access" + * error. + */ + index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; + if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) + return NULL; + + return &kfence_metadata[index]; +} + /* KFENCE error types for report generation. */ enum kfence_error_type { KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ diff --git a/mm/kfence/report.c b/mm/kfence/report.c index f93a7b2a338b..f5a6d8ba3e21 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r /* We encountered a memory safety error, taint the kernel! */ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); } + +#ifdef CONFIG_PRINTK +static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack) +{ + int i, j; + + i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL); + for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j) + kp_stack[j] = (void *)track->stack_entries[i]; + if (j < KS_ADDRS_COUNT) + kp_stack[j] = NULL; +} + +bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +{ + struct kfence_metadata *meta = addr_to_metadata((unsigned long)object); + unsigned long flags; + + if (!meta) + return false; + + /* + * If state is UNUSED at least show the pointer requested; the rest + * would be garbage data. + */ + kpp->kp_ptr = object; + + /* Requesting info an a never-used object is almost certainly a bug. */ + if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED)) + return true; + + raw_spin_lock_irqsave(&meta->lock, flags); + + kpp->kp_slab = slab; + kpp->kp_slab_cache = meta->cache; + kpp->kp_objp = (void *)meta->addr; + kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack); + if (meta->state == KFENCE_OBJECT_FREED) + kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack); + /* get_stack_skipnr() ensures the first entry is outside allocator. */ + kpp->kp_ret = kpp->kp_stack[0]; + + raw_spin_unlock_irqrestore(&meta->lock, flags); + + return true; +} +#endif diff --git a/mm/slab.c b/mm/slab.c index b04e40078bdf..0edb474edef1 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller); #endif /* CONFIG_NUMA */ #ifdef CONFIG_PRINTK -void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) { struct kmem_cache *cachep; unsigned int objnr; diff --git a/mm/slab.h b/mm/slab.h index fd7ae2024897..95eb34174c1b 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -868,7 +868,7 @@ struct kmem_obj_info { void *kp_stack[KS_ADDRS_COUNT]; void *kp_free_stack[KS_ADDRS_COUNT]; }; -void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); +void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); #endif #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR diff --git a/mm/slab_common.c b/mm/slab_common.c index 6ee64d6208b3..2b3206a2c3b5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object) } EXPORT_SYMBOL_GPL(kmem_valid_obj); +static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +{ + if (__kfence_obj_info(kpp, object, slab)) + return; + __kmem_obj_info(kpp, object, slab); +} + /** * kmem_dump_obj - Print available slab provenance information * @object: slab object for which to find provenance information. @@ -590,6 +597,8 @@ void kmem_dump_obj(void *object) pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); else pr_cont(" slab%s", cp); + if (is_kfence_address(object)) + pr_cont(" (kfence)"); if (kp.kp_objp) pr_cont(" start %px", kp.kp_objp); if (kp.kp_data_offset) diff --git a/mm/slob.c b/mm/slob.c index dfa6808dff36..40ea6e2d4ccd 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -463,7 +463,7 @@ out: } #ifdef CONFIG_PRINTK -void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) { kpp->kp_ptr = object; kpp->kp_slab = slab; diff --git a/mm/slub.c b/mm/slub.c index 74d92aa4a3a2..ed5c2c03a47a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s) } #ifdef CONFIG_PRINTK -void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) +void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) { void *base; int __maybe_unused i; -- cgit v1.2.3-71-gd317 From 705191b03d507744c7e097f78d583621c14988ac Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 19 Apr 2022 15:14:23 +0200 Subject: fs: fix acl translation Last cycle we extended the idmapped mounts infrastructure to support idmapped mounts of idmapped filesystems (No such filesystem yet exist.). Since then, the meaning of an idmapped mount is a mount whose idmapping is different from the filesystems idmapping. While doing that work we missed to adapt the acl translation helpers. They still assume that checking for the identity mapping is enough. But they need to use the no_idmapping() helper instead. Note, POSIX ACLs are always translated right at the userspace-kernel boundary using the caller's current idmapping and the initial idmapping. The order depends on whether we're coming from or going to userspace. The filesystem's idmapping doesn't matter at the border. Consequently, if a non-idmapped mount is passed we need to make sure to always pass the initial idmapping as the mount's idmapping and not the filesystem idmapping. Since it's irrelevant here it would yield invalid ids and prevent setting acls for filesystems that are mountable in a userns and support posix acls (tmpfs and fuse). I verified the regression reported in [1] and verified that this patch fixes it. A regression test will be added to xfstests in parallel. Link: https://bugzilla.kernel.org/show_bug.cgi?id=215849 [1] Fixes: bd303368b776 ("fs: support mapped mounts of mapped filesystems") Cc: Seth Forshee Cc: Christoph Hellwig Cc: # 5.17 Cc: Signed-off-by: Christian Brauner (Microsoft) Signed-off-by: Linus Torvalds --- fs/posix_acl.c | 10 ++++++++++ fs/xattr.c | 6 ++++-- include/linux/posix_acl_xattr.h | 4 ++++ 3 files changed, 18 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 80acb6885cf9..962d32468eb4 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -759,9 +759,14 @@ static void posix_acl_fix_xattr_userns( } void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, + struct inode *inode, void *value, size_t size) { struct user_namespace *user_ns = current_user_ns(); + + /* Leave ids untouched on non-idmapped mounts. */ + if (no_idmapping(mnt_userns, i_user_ns(inode))) + mnt_userns = &init_user_ns; if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns)) return; posix_acl_fix_xattr_userns(&init_user_ns, user_ns, mnt_userns, value, @@ -769,9 +774,14 @@ void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, } void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, + struct inode *inode, void *value, size_t size) { struct user_namespace *user_ns = current_user_ns(); + + /* Leave ids untouched on non-idmapped mounts. */ + if (no_idmapping(mnt_userns, i_user_ns(inode))) + mnt_userns = &init_user_ns; if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns)) return; posix_acl_fix_xattr_userns(user_ns, &init_user_ns, mnt_userns, value, diff --git a/fs/xattr.c b/fs/xattr.c index 5c8c5175b385..998045165916 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -569,7 +569,8 @@ setxattr(struct user_namespace *mnt_userns, struct dentry *d, } if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) || (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)) - posix_acl_fix_xattr_from_user(mnt_userns, kvalue, size); + posix_acl_fix_xattr_from_user(mnt_userns, d_inode(d), + kvalue, size); } error = vfs_setxattr(mnt_userns, d, kname, kvalue, size, flags); @@ -667,7 +668,8 @@ getxattr(struct user_namespace *mnt_userns, struct dentry *d, if (error > 0) { if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) || (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)) - posix_acl_fix_xattr_to_user(mnt_userns, kvalue, error); + posix_acl_fix_xattr_to_user(mnt_userns, d_inode(d), + kvalue, error); if (size && copy_to_user(value, kvalue, error)) error = -EFAULT; } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) { diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h index 060e8d203181..1766e1de6956 100644 --- a/include/linux/posix_acl_xattr.h +++ b/include/linux/posix_acl_xattr.h @@ -34,15 +34,19 @@ posix_acl_xattr_count(size_t size) #ifdef CONFIG_FS_POSIX_ACL void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, + struct inode *inode, void *value, size_t size); void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, + struct inode *inode, void *value, size_t size); #else static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, + struct inode *inode, void *value, size_t size) { } static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, + struct inode *inode, void *value, size_t size) { } -- cgit v1.2.3-71-gd317 From 559089e0a93d44280ec3ab478830af319c56dbe3 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 15 Apr 2022 09:44:10 -0700 Subject: vmalloc: replace VM_NO_HUGE_VMAP with VM_ALLOW_HUGE_VMAP Huge page backed vmalloc memory could benefit performance in many cases. However, some users of vmalloc may not be ready to handle huge pages for various reasons: hardware constraints, potential pages split, etc. VM_NO_HUGE_VMAP was introduced to allow vmalloc users to opt-out huge pages. However, it is not easy to track down all the users that require the opt-out, as the allocation are passed different stacks and may cause issues in different layers. To address this issue, replace VM_NO_HUGE_VMAP with an opt-in flag, VM_ALLOW_HUGE_VMAP, so that users that benefit from huge pages could ask specificially. Also, remove vmalloc_no_huge() and add opt-in helper vmalloc_huge(). Fixes: fac54e2bfb5b ("x86/Kconfig: Select HAVE_ARCH_HUGE_VMALLOC with HAVE_ARCH_HUGE_VMAP") Link: https://lore.kernel.org/netdev/14444103-d51b-0fb3-ee63-c3f182f0b546@molgen.mpg.de/" Reviewed-by: Christoph Hellwig Signed-off-by: Song Liu Reviewed-by: Rik van Riel Signed-off-by: Linus Torvalds --- arch/Kconfig | 6 ++---- arch/powerpc/kernel/module.c | 2 +- arch/s390/kvm/pv.c | 7 +------ include/linux/vmalloc.h | 4 ++-- mm/vmalloc.c | 17 ++++++++++------- 5 files changed, 16 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 29b0167c088b..31c4fdc4a4ba 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -854,10 +854,8 @@ config HAVE_ARCH_HUGE_VMAP # # Archs that select this would be capable of PMD-sized vmaps (i.e., -# arch_vmap_pmd_supported() returns true), and they must make no assumptions -# that vmalloc memory is mapped with PAGE_SIZE ptes. The VM_NO_HUGE_VMAP flag -# can be used to prohibit arch-specific allocations from using hugepages to -# help with this (e.g., modules may require it). +# arch_vmap_pmd_supported() returns true). The VM_ALLOW_HUGE_VMAP flag +# must be used to enable allocations to use hugepages. # config HAVE_ARCH_HUGE_VMALLOC depends on HAVE_ARCH_HUGE_VMAP diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 40a583e9d3c7..97a76a8619fb 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -101,7 +101,7 @@ __module_alloc(unsigned long size, unsigned long start, unsigned long end, bool * too. */ return __vmalloc_node_range(size, 1, start, end, gfp, prot, - VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP, + VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __builtin_return_address(0)); } diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 7f7c0d6af2ce..cc7c9599f43e 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -137,12 +137,7 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm) /* Allocate variable storage */ vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE); vlen += uv_info.guest_virt_base_stor_len; - /* - * The Create Secure Configuration Ultravisor Call does not support - * using large pages for the virtual memory area. - * This is a hardware limitation. - */ - kvm->arch.pv.stor_var = vmalloc_no_huge(vlen); + kvm->arch.pv.stor_var = vzalloc(vlen); if (!kvm->arch.pv.stor_var) goto out_err; return 0; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3b1df7da402d..b159c2789961 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -26,7 +26,7 @@ struct notifier_block; /* in notifier.h */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ -#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ +#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ !defined(CONFIG_KASAN_VMALLOC) @@ -153,7 +153,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, const void *caller) __alloc_size(1); void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) __alloc_size(1); -void *vmalloc_no_huge(unsigned long size) __alloc_size(1); +void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1); extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 0b17498a34f1..07da85ae825b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3095,7 +3095,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, return NULL; } - if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) { + if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { unsigned long size_per_node; /* @@ -3262,21 +3262,24 @@ void *vmalloc(unsigned long size) EXPORT_SYMBOL(vmalloc); /** - * vmalloc_no_huge - allocate virtually contiguous memory using small pages - * @size: allocation size + * vmalloc_huge - allocate virtually contiguous memory, allow huge pages + * @size: allocation size + * @gfp_mask: flags for the page level allocator * - * Allocate enough non-huge pages to cover @size from the page level + * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. + * If @size is greater than or equal to PMD_SIZE, allow using + * huge pages for the memory * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc_no_huge(unsigned long size) +void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) { return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, - GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP, + gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, NUMA_NO_NODE, __builtin_return_address(0)); } -EXPORT_SYMBOL(vmalloc_no_huge); +EXPORT_SYMBOL_GPL(vmalloc_huge); /** * vzalloc - allocate virtually contiguous memory with zero fill -- cgit v1.2.3-71-gd317 From d4e5268a08b211b536fed29beb24271ecd85187e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 8 Apr 2022 11:45:55 +0200 Subject: x86,objtool: Mark cpu_startup_entry() __noreturn GCC-8 isn't clever enough to figure out that cpu_start_entry() is a noreturn while objtool is. This results in code after the call in start_secondary(). Give GCC a hand so that they all agree on things. vmlinux.o: warning: objtool: start_secondary()+0x10e: unreachable Reported-by: Rick Edgecombe Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20220408094718.383658532@infradead.org --- include/linux/cpu.h | 2 +- tools/objtool/check.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 9cf51e41e697..54dc2f9a2d56 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -167,7 +167,7 @@ static inline int suspend_disable_secondary_cpus(void) { return 0; } static inline void suspend_enable_secondary_cpus(void) { } #endif /* !CONFIG_PM_SLEEP_SMP */ -void cpu_startup_entry(enum cpuhp_state state); +void __noreturn cpu_startup_entry(enum cpuhp_state state); void cpu_idle_poll_ctrl(bool enable); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index bd0c2c828940..e3a675d6a704 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -184,6 +184,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "do_group_exit", "stop_this_cpu", "__invalid_creds", + "cpu_startup_entry", }; if (!func) -- cgit v1.2.3-71-gd317 From 37c5f9e80e015d0df17d0c377c18523002986851 Mon Sep 17 00:00:00 2001 From: Oleksandr Ocheretnyi Date: Sun, 17 Apr 2022 11:46:47 -0700 Subject: mtd: fix 'part' field data corruption in mtd_info Commit 46b5889cc2c5 ("mtd: implement proper partition handling") started using "mtd_get_master_ofs()" in mtd callbacks to determine memory offsets by means of 'part' field from mtd_info, what previously was smashed accessing 'master' field in the mtd_set_dev_defaults() method. That provides wrong offset what causes hardware access errors. Just make 'part', 'master' as separate fields, rather than using union type to avoid 'part' data corruption when mtd_set_dev_defaults() is called. Fixes: 46b5889cc2c5 ("mtd: implement proper partition handling") Signed-off-by: Oleksandr Ocheretnyi Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20220417184649.449289-1-oocheret@cisco.com --- include/linux/mtd/mtd.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 151607e9d64a..955aee14b0f7 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -389,10 +389,8 @@ struct mtd_info { /* List of partitions attached to this MTD device */ struct list_head partitions; - union { - struct mtd_part part; - struct mtd_master master; - }; + struct mtd_part part; + struct mtd_master master; }; static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd) -- cgit v1.2.3-71-gd317 From 8d084b2eae7fc5fcfc9f143cd7321a88e1cd76aa Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 5 Apr 2022 17:15:13 +0200 Subject: usb: typec: tcpm: Fix undefined behavior due to shift overflowing the constant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix: drivers/usb/typec/tcpm/tcpm.c: In function ‘run_state_machine’: drivers/usb/typec/tcpm/tcpm.c:4724:3: error: case label does not reduce to an integer constant case BDO_MODE_TESTDATA: ^~~~ See https://lore.kernel.org/r/YkwQ6%2BtIH8GQpuct@zn.tnic for the gory details as to why it triggers with older gccs only. Signed-off-by: Borislav Petkov Cc: Greg Kroah-Hartman Cc: linux-usb@vger.kernel.org Link: https://lore.kernel.org/r/20220405151517.29753-8-bp@alien8.de Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/pd_bdo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/usb/pd_bdo.h b/include/linux/usb/pd_bdo.h index 033fe3e17141..7c25b88d79f9 100644 --- a/include/linux/usb/pd_bdo.h +++ b/include/linux/usb/pd_bdo.h @@ -15,7 +15,7 @@ #define BDO_MODE_CARRIER2 (5 << 28) #define BDO_MODE_CARRIER3 (6 << 28) #define BDO_MODE_EYE (7 << 28) -#define BDO_MODE_TESTDATA (8 << 28) +#define BDO_MODE_TESTDATA (8U << 28) #define BDO_MODE_MASK(mode) ((mode) & 0xf0000000) -- cgit v1.2.3-71-gd317 From 2031f2876896d82aca7e82f84accd9181b9587fb Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 15 Apr 2022 00:43:43 +0000 Subject: KVM: Add helpers to wrap vcpu->srcu_idx and yell if it's abused Add wrappers to acquire/release KVM's SRCU lock when stashing the index in vcpu->src_idx, along with rudimentary detection of illegal usage, e.g. re-acquiring SRCU and thus overwriting vcpu->src_idx. Because the SRCU index is (currently) either 0 or 1, illegal nesting bugs can go unnoticed for quite some time and only cause problems when the nested lock happens to get a different index. Wrap the WARNs in PROVE_RCU=y, and make them ONCE, otherwise KVM will likely yell so loudly that it will bring the kernel to its knees. Signed-off-by: Sean Christopherson Tested-by: Fabiano Rosas Message-Id: <20220415004343.2203171-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 9 +++++---- arch/powerpc/kvm/book3s_hv_nested.c | 16 ++++++++-------- arch/powerpc/kvm/book3s_rtas.c | 4 ++-- arch/powerpc/kvm/powerpc.c | 4 ++-- arch/riscv/kvm/vcpu.c | 16 ++++++++-------- arch/riscv/kvm/vcpu_exit.c | 4 ++-- arch/s390/kvm/interrupt.c | 4 ++-- arch/s390/kvm/kvm-s390.c | 8 ++++---- arch/s390/kvm/vsie.c | 4 ++-- arch/x86/kvm/x86.c | 28 +++++++++++++--------------- include/linux/kvm_host.h | 24 +++++++++++++++++++++++- 11 files changed, 71 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index e4ce2a35483f..42851c32ff3b 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -168,9 +168,10 @@ int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, return -EINVAL; /* Read the entry from guest memory */ addr = base + (index * sizeof(rpte)); - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); + + kvm_vcpu_srcu_read_lock(vcpu); ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); if (ret) { if (pte_ret_p) *pte_ret_p = addr; @@ -246,9 +247,9 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, /* Read the table to find the root of the radix tree */ ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry)); - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); if (ret) return ret; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 9d373f8963ee..c943a051c6e7 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -306,10 +306,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) /* copy parameters in */ hv_ptr = kvmppc_get_gpr(vcpu, 4); regs_ptr = kvmppc_get_gpr(vcpu, 5); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs, hv_ptr, regs_ptr); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); if (err) return H_PARAMETER; @@ -410,10 +410,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) byteswap_hv_regs(&l2_hv); byteswap_pt_regs(&l2_regs); } - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs, hv_ptr, regs_ptr); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); if (err) return H_AUTHORITY; @@ -600,16 +600,16 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu) goto not_found; /* Write what was loaded into our buffer back to the L1 guest */ - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); if (rc) goto not_found; } else { /* Load the data to be stored from the L1 guest into our buf */ - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); if (rc) goto not_found; diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 0f847f1e5ddd..6808bda0dbc1 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -229,9 +229,9 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) */ args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM; - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args)); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); if (rc) goto fail; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 875c30c12db0..533c4232e5ab 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -425,9 +425,9 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, return EMULATE_DONE; } - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); if (rc) return EMULATE_DO_MMIO; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 4a52fda6417b..7461f964d20a 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -727,13 +727,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Mark this VCPU ran at least once */ vcpu->arch.ran_atleast_once = true; - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); /* Process MMIO value returned from user-space */ if (run->exit_reason == KVM_EXIT_MMIO) { ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); if (ret) { - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); return ret; } } @@ -742,13 +742,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (run->exit_reason == KVM_EXIT_RISCV_SBI) { ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); if (ret) { - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); return ret; } } if (run->immediate_exit) { - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); return -EINTR; } @@ -787,7 +787,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) */ vcpu->mode = IN_GUEST_MODE; - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); smp_mb__after_srcu_read_unlock(); /* @@ -805,7 +805,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; local_irq_enable(); preempt_enable(); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); continue; } @@ -849,7 +849,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) preempt_enable(); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); ret = kvm_riscv_vcpu_exit(vcpu, run, &trap); } @@ -858,7 +858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu_put(vcpu); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); return ret; } diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index 2d56faddb9d1..a72c15d4b42a 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -456,9 +456,9 @@ static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu) { if (!kvm_arch_vcpu_runnable(vcpu)) { - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); kvm_vcpu_halt(vcpu); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); } } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 9b30beac904d..af96dc0549a4 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1334,11 +1334,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); no_timer: - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); kvm_vcpu_halt(vcpu); vcpu->valid_wakeup = false; __unset_cpu_idle(vcpu); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); hrtimer_cancel(&vcpu->arch.ckc_timer); return 0; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 156d1c25a3c1..da3dabda1a12 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4237,14 +4237,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) * We try to hold kvm->srcu during most of vcpu_run (except when run- * ning the guest), so that memslots (and other stuff) are protected */ - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); do { rc = vcpu_pre_run(vcpu); if (rc) break; - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); /* * As PF_VCPU will be used in fault handler, between * guest_enter and guest_exit should be no uaccess. @@ -4281,12 +4281,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) __enable_cpu_timer_accounting(vcpu); guest_exit_irqoff(); local_irq_enable(); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); rc = vcpu_post_run(vcpu, exit_reason); } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); return rc; } diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index acda4b6fc851..dada78b92691 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -1091,7 +1091,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) handle_last_fault(vcpu, vsie_page); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); /* save current guest state of bp isolation override */ guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST); @@ -1133,7 +1133,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (!guest_bp_isolation) clear_thread_flag(TIF_ISOLATE_BP_GUEST); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); if (rc == -EINTR) { VCPU_EVENT(vcpu, 3, "%s", "machine check"); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 867c0fd8d187..51eb27824452 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10097,7 +10097,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) /* Store vcpu->apicv_active before vcpu->mode. */ smp_store_release(&vcpu->mode, IN_GUEST_MODE); - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); /* * 1) We should set ->mode before checking ->requests. Please see @@ -10128,7 +10128,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) smp_wmb(); local_irq_enable(); preempt_enable(); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); r = 1; goto cancel_injection; } @@ -10254,7 +10254,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) local_irq_enable(); preempt_enable(); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); /* * Profile KVM exit RIPs: @@ -10284,7 +10284,7 @@ out: } /* Called within kvm->srcu read side. */ -static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) +static inline int vcpu_block(struct kvm_vcpu *vcpu) { bool hv_timer; @@ -10300,12 +10300,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) if (hv_timer) kvm_lapic_switch_to_sw_timer(vcpu); - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) kvm_vcpu_halt(vcpu); else kvm_vcpu_block(vcpu); - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); if (hv_timer) kvm_lapic_switch_to_hv_timer(vcpu); @@ -10347,7 +10347,6 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) static int vcpu_run(struct kvm_vcpu *vcpu) { int r; - struct kvm *kvm = vcpu->kvm; vcpu->arch.l1tf_flush_l1d = true; @@ -10355,7 +10354,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) if (kvm_vcpu_running(vcpu)) { r = vcpu_enter_guest(vcpu); } else { - r = vcpu_block(kvm, vcpu); + r = vcpu_block(vcpu); } if (r <= 0) @@ -10374,9 +10373,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu) } if (__xfer_to_guest_mode_work_pending()) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); r = xfer_to_guest_mode_handle_work(vcpu); - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); if (r) return r; } @@ -10479,7 +10478,6 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; - struct kvm *kvm = vcpu->kvm; int r; vcpu_load(vcpu); @@ -10487,7 +10485,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_run->flags = 0; kvm_load_guest_fpu(vcpu); - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { if (kvm_run->immediate_exit) { r = -EINTR; @@ -10499,9 +10497,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) */ WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); kvm_vcpu_block(vcpu); - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); + kvm_vcpu_srcu_read_lock(vcpu); if (kvm_apic_accept_events(vcpu) < 0) { r = 0; @@ -10562,7 +10560,7 @@ out: if (kvm_run->kvm_valid_regs) store_regs(vcpu); post_kvm_run_save(vcpu); - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); + kvm_vcpu_srcu_read_unlock(vcpu); kvm_sigset_deactivate(vcpu); vcpu_put(vcpu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3f9b22c4983a..2dab4b696682 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -315,7 +315,10 @@ struct kvm_vcpu { int cpu; int vcpu_id; /* id given by userspace at creation */ int vcpu_idx; /* index in kvm->vcpus array */ - int srcu_idx; + int ____srcu_idx; /* Don't use this directly. You've been warned. */ +#ifdef CONFIG_PROVE_RCU + int srcu_depth; +#endif int mode; u64 requests; unsigned long guest_debug; @@ -840,6 +843,25 @@ static inline void kvm_vm_bugged(struct kvm *kvm) unlikely(__ret); \ }) +static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_PROVE_RCU + WARN_ONCE(vcpu->srcu_depth++, + "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1); +#endif + vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); +} + +static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu) +{ + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx); + +#ifdef CONFIG_PROVE_RCU + WARN_ONCE(--vcpu->srcu_depth, + "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth); +#endif +} + static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) { return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); -- cgit v1.2.3-71-gd317 From 683412ccf61294d727ead4a73d97397396e69a6b Mon Sep 17 00:00:00 2001 From: Mingwei Zhang Date: Thu, 21 Apr 2022 03:14:07 +0000 Subject: KVM: SEV: add cache flush to solve SEV cache incoherency issues Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: stable@vger.kernel.org Suggested-by: Sean Christpherson Reported-by: Mingwei Zhang Signed-off-by: Mingwei Zhang Message-Id: <20220421031407.2516575-4-mizhang@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm/sev.c | 8 ++++++++ arch/x86/kvm/svm/svm.c | 1 + arch/x86/kvm/svm/svm.h | 2 ++ arch/x86/kvm/x86.c | 5 +++++ include/linux/kvm_host.h | 2 ++ virt/kvm/kvm_main.c | 27 ++++++++++++++++++++++++--- 8 files changed, 44 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 3c368b639c04..1a6d7e3f6c32 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -118,6 +118,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_register_region) KVM_X86_OP_OPTIONAL(mem_enc_unregister_region) KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from) KVM_X86_OP_OPTIONAL(vm_move_enc_context_from) +KVM_X86_OP_OPTIONAL(guest_memory_reclaimed) KVM_X86_OP(get_msr_feature) KVM_X86_OP(can_emulate_instruction) KVM_X86_OP(apic_init_signal_blocked) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e0c0f0e1f754..4ff36610af6a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1484,6 +1484,7 @@ struct kvm_x86_ops { int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd); int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd); + void (*guest_memory_reclaimed)(struct kvm *kvm); int (*get_msr_feature)(struct kvm_msr_entry *entry); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 9a0375987029..0ad70c12c7c3 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2262,6 +2262,14 @@ do_wbinvd: wbinvd_on_all_cpus(); } +void sev_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!sev_guest(kvm)) + return; + + wbinvd_on_all_cpus(); +} + void sev_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index bd4c64b362d2..7e45d03cd018 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4620,6 +4620,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_ioctl = sev_mem_enc_ioctl, .mem_enc_register_region = sev_mem_enc_register_region, .mem_enc_unregister_region = sev_mem_enc_unregister_region, + .guest_memory_reclaimed = sev_guest_memory_reclaimed, .vm_copy_enc_context_from = sev_vm_copy_enc_context_from, .vm_move_enc_context_from = sev_vm_move_enc_context_from, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index f77a7d2d39dd..f76deff71002 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -609,6 +609,8 @@ int sev_mem_enc_unregister_region(struct kvm *kvm, struct kvm_enc_region *range); int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd); int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd); +void sev_guest_memory_reclaimed(struct kvm *kvm); + void pre_sev_run(struct vcpu_svm *svm, int cpu); void __init sev_set_cpu_caps(void); void __init sev_hardware_setup(void); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c89dc09a764f..a6ab19afc638 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9889,6 +9889,11 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); } +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ + static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); +} + static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { if (!lapic_in_kernel(vcpu)) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2dab4b696682..34eed5f85ed6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2219,6 +2219,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); #else diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2a23f24d13cf..f30bb8c16f26 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -164,6 +164,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, { } +__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ +} + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) { /* @@ -357,6 +361,12 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); #endif +static void kvm_flush_shadow_all(struct kvm *kvm) +{ + kvm_arch_flush_shadow_all(kvm); + kvm_arch_guest_memory_reclaimed(kvm); +} + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, gfp_t gfp_flags) @@ -485,12 +495,15 @@ typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, unsigned long end); +typedef void (*on_unlock_fn_t)(struct kvm *kvm); + struct kvm_hva_range { unsigned long start; unsigned long end; pte_t pte; hva_handler_t handler; on_lock_fn_t on_lock; + on_unlock_fn_t on_unlock; bool flush_on_ret; bool may_block; }; @@ -578,8 +591,11 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, if (range->flush_on_ret && ret) kvm_flush_remote_tlbs(kvm); - if (locked) + if (locked) { KVM_MMU_UNLOCK(kvm); + if (!IS_KVM_NULL_FN(range->on_unlock)) + range->on_unlock(kvm); + } srcu_read_unlock(&kvm->srcu, idx); @@ -600,6 +616,7 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, .pte = pte, .handler = handler, .on_lock = (void *)kvm_null_fn, + .on_unlock = (void *)kvm_null_fn, .flush_on_ret = true, .may_block = false, }; @@ -619,6 +636,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn .pte = __pte(0), .handler = handler, .on_lock = (void *)kvm_null_fn, + .on_unlock = (void *)kvm_null_fn, .flush_on_ret = false, .may_block = false, }; @@ -687,6 +705,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, .pte = __pte(0), .handler = kvm_unmap_gfn_range, .on_lock = kvm_inc_notifier_count, + .on_unlock = kvm_arch_guest_memory_reclaimed, .flush_on_ret = true, .may_block = mmu_notifier_range_blockable(range), }; @@ -741,6 +760,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, .pte = __pte(0), .handler = (void *)kvm_null_fn, .on_lock = kvm_dec_notifier_count, + .on_unlock = (void *)kvm_null_fn, .flush_on_ret = false, .may_block = mmu_notifier_range_blockable(range), }; @@ -813,7 +833,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -1225,7 +1245,7 @@ static void kvm_destroy_vm(struct kvm *kvm) WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); kvm->mn_active_invalidate_count = 0; #else - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); @@ -1652,6 +1672,7 @@ static void kvm_invalidate_memslot(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, old); + kvm_arch_guest_memory_reclaimed(kvm); /* Was released by kvm_swap_active_memslots, reacquire. */ mutex_lock(&kvm->slots_arch_lock); -- cgit v1.2.3-71-gd317 From 405ce051236cc65b30bbfe490b28ce60ae6aed85 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Thu, 21 Apr 2022 16:35:33 -0700 Subject: mm/hwpoison: fix race between hugetlb free/demotion and memory_failure_hugetlb() There is a race condition between memory_failure_hugetlb() and hugetlb free/demotion, which causes setting PageHWPoison flag on the wrong page. The one simple result is that wrong processes can be killed, but another (more serious) one is that the actual error is left unhandled, so no one prevents later access to it, and that might lead to more serious results like consuming corrupted data. Think about the below race window: CPU 1 CPU 2 memory_failure_hugetlb struct page *head = compound_head(p); hugetlb page might be freed to buddy, or even changed to another compound page. get_hwpoison_page -- page is not what we want now... The current code first does prechecks roughly and then reconfirms after taking refcount, but it's found that it makes code overly complicated, so move the prechecks in a single hugetlb_lock range. A newly introduced function, try_memory_failure_hugetlb(), always takes hugetlb_lock (even for non-hugetlb pages). That can be improved, but memory_failure() is rare in principle, so should not be a big problem. Link: https://lkml.kernel.org/r/20220408135323.1559401-2-naoya.horiguchi@linux.dev Fixes: 761ad8d7c7b5 ("mm: hwpoison: introduce memory_failure_hugetlb()") Signed-off-by: Naoya Horiguchi Reported-by: Mike Kravetz Reviewed-by: Miaohe Lin Reviewed-by: Mike Kravetz Cc: Yang Shi Cc: Dan Carpenter Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 6 ++ include/linux/mm.h | 8 +++ mm/hugetlb.c | 10 ++++ mm/memory-failure.c | 145 ++++++++++++++++++++++++++++++++++-------------- 4 files changed, 127 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 53c1b6082a4c..ac2a1d758a80 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -169,6 +169,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); bool isolate_huge_page(struct page *page, struct list_head *list); int get_hwpoison_huge_page(struct page *page, bool *hugetlb); +int get_huge_page_for_hwpoison(unsigned long pfn, int flags); void putback_active_hugepage(struct page *page); void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void free_huge_page(struct page *page); @@ -378,6 +379,11 @@ static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb) return 0; } +static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags) +{ + return 0; +} + static inline void putback_active_hugepage(struct page *page) { } diff --git a/include/linux/mm.h b/include/linux/mm.h index e34edb775334..9f44254af8ce 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3197,6 +3197,14 @@ extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(unsigned long pfn, int flags); +#ifdef CONFIG_MEMORY_FAILURE +extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags); +#else +static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags) +{ + return 0; +} +#endif #ifndef arch_memory_failure static inline int arch_memory_failure(unsigned long pfn, int flags) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f8ca7cca3c1a..3fc721789743 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6785,6 +6785,16 @@ int get_hwpoison_huge_page(struct page *page, bool *hugetlb) return ret; } +int get_huge_page_for_hwpoison(unsigned long pfn, int flags) +{ + int ret; + + spin_lock_irq(&hugetlb_lock); + ret = __get_huge_page_for_hwpoison(pfn, flags); + spin_unlock_irq(&hugetlb_lock); + return ret; +} + void putback_active_hugepage(struct page *page) { spin_lock_irq(&hugetlb_lock); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index dcb6bb9cf731..2020944398c9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1498,50 +1498,113 @@ static int try_to_split_thp_page(struct page *page, const char *msg) return 0; } -static int memory_failure_hugetlb(unsigned long pfn, int flags) +/* + * Called from hugetlb code with hugetlb_lock held. + * + * Return values: + * 0 - free hugepage + * 1 - in-use hugepage + * 2 - not a hugepage + * -EBUSY - the hugepage is busy (try to retry) + * -EHWPOISON - the hugepage is already hwpoisoned + */ +int __get_huge_page_for_hwpoison(unsigned long pfn, int flags) +{ + struct page *page = pfn_to_page(pfn); + struct page *head = compound_head(page); + int ret = 2; /* fallback to normal page handling */ + bool count_increased = false; + + if (!PageHeadHuge(head)) + goto out; + + if (flags & MF_COUNT_INCREASED) { + ret = 1; + count_increased = true; + } else if (HPageFreed(head) || HPageMigratable(head)) { + ret = get_page_unless_zero(head); + if (ret) + count_increased = true; + } else { + ret = -EBUSY; + goto out; + } + + if (TestSetPageHWPoison(head)) { + ret = -EHWPOISON; + goto out; + } + + return ret; +out: + if (count_increased) + put_page(head); + return ret; +} + +#ifdef CONFIG_HUGETLB_PAGE +/* + * Taking refcount of hugetlb pages needs extra care about race conditions + * with basic operations like hugepage allocation/free/demotion. + * So some of prechecks for hwpoison (pinning, and testing/setting + * PageHWPoison) should be done in single hugetlb_lock range. + */ +static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) { - struct page *p = pfn_to_page(pfn); - struct page *head = compound_head(p); int res; + struct page *p = pfn_to_page(pfn); + struct page *head; unsigned long page_flags; + bool retry = true; - if (TestSetPageHWPoison(head)) { - pr_err("Memory failure: %#lx: already hardware poisoned\n", - pfn); - res = -EHWPOISON; - if (flags & MF_ACTION_REQUIRED) + *hugetlb = 1; +retry: + res = get_huge_page_for_hwpoison(pfn, flags); + if (res == 2) { /* fallback to normal page handling */ + *hugetlb = 0; + return 0; + } else if (res == -EHWPOISON) { + pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn); + if (flags & MF_ACTION_REQUIRED) { + head = compound_head(p); res = kill_accessing_process(current, page_to_pfn(head), flags); + } return res; + } else if (res == -EBUSY) { + if (retry) { + retry = false; + goto retry; + } + action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); + return res; + } + + head = compound_head(p); + lock_page(head); + + if (hwpoison_filter(p)) { + ClearPageHWPoison(head); + res = -EOPNOTSUPP; + goto out; } num_poisoned_pages_inc(); - if (!(flags & MF_COUNT_INCREASED)) { - res = get_hwpoison_page(p, flags); - if (!res) { - lock_page(head); - if (hwpoison_filter(p)) { - if (TestClearPageHWPoison(head)) - num_poisoned_pages_dec(); - unlock_page(head); - return -EOPNOTSUPP; - } - unlock_page(head); - res = MF_FAILED; - if (__page_handle_poison(p)) { - page_ref_inc(p); - res = MF_RECOVERED; - } - action_result(pfn, MF_MSG_FREE_HUGE, res); - return res == MF_RECOVERED ? 0 : -EBUSY; - } else if (res < 0) { - action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); - return -EBUSY; + /* + * Handling free hugepage. The possible race with hugepage allocation + * or demotion can be prevented by PageHWPoison flag. + */ + if (res == 0) { + unlock_page(head); + res = MF_FAILED; + if (__page_handle_poison(p)) { + page_ref_inc(p); + res = MF_RECOVERED; } + action_result(pfn, MF_MSG_FREE_HUGE, res); + return res == MF_RECOVERED ? 0 : -EBUSY; } - lock_page(head); - /* * The page could have changed compound pages due to race window. * If this happens just bail out. @@ -1554,14 +1617,6 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags) page_flags = head->flags; - if (hwpoison_filter(p)) { - if (TestClearPageHWPoison(head)) - num_poisoned_pages_dec(); - put_page(p); - res = -EOPNOTSUPP; - goto out; - } - /* * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so * simply disable it. In order to make it work properly, we need @@ -1588,6 +1643,12 @@ out: unlock_page(head); return res; } +#else +static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) +{ + return 0; +} +#endif static int memory_failure_dev_pagemap(unsigned long pfn, int flags, struct dev_pagemap *pgmap) @@ -1712,6 +1773,7 @@ int memory_failure(unsigned long pfn, int flags) int res = 0; unsigned long page_flags; bool retry = true; + int hugetlb = 0; if (!sysctl_memory_failure_recovery) panic("Memory failure on page %lx", pfn); @@ -1739,10 +1801,9 @@ int memory_failure(unsigned long pfn, int flags) } try_again: - if (PageHuge(p)) { - res = memory_failure_hugetlb(pfn, flags); + res = try_memory_failure_hugetlb(pfn, flags, &hugetlb); + if (hugetlb) goto unlock_mutex; - } if (TestSetPageHWPoison(p)) { pr_err("Memory failure: %#lx: already hardware poisoned\n", -- cgit v1.2.3-71-gd317 From 9b3016154c913b2e7ec5ae5c9a42eb9e732d86aa Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 21 Apr 2022 16:35:40 -0700 Subject: memcg: sync flush only if periodic flush is delayed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Daniel Dao has reported [1] a regression on workloads that may trigger a lot of refaults (anon and file). The underlying issue is that flushing rstat is expensive. Although rstat flush are batched with (nr_cpus * MEMCG_BATCH) stat updates, it seems like there are workloads which genuinely do stat updates larger than batch value within short amount of time. Since the rstat flush can happen in the performance critical codepaths like page faults, such workload can suffer greatly. This patch fixes this regression by making the rstat flushing conditional in the performance critical codepaths. More specifically, the kernel relies on the async periodic rstat flusher to flush the stats and only if the periodic flusher is delayed by more than twice the amount of its normal time window then the kernel allows rstat flushing from the performance critical codepaths. Now the question: what are the side-effects of this change? The worst that can happen is the refault codepath will see 4sec old lruvec stats and may cause false (or missed) activations of the refaulted page which may under-or-overestimate the workingset size. Though that is not very concerning as the kernel can already miss or do false activations. There are two more codepaths whose flushing behavior is not changed by this patch and we may need to come to them in future. One is the writeback stats used by dirty throttling and second is the deactivation heuristic in the reclaim. For now keeping an eye on them and if there is report of regression due to these codepaths, we will reevaluate then. Link: https://lore.kernel.org/all/CA+wXwBSyO87ZX5PVwdHm-=dBjZYECGmfnydUicUyrQqndgX2MQ@mail.gmail.com [1] Link: https://lkml.kernel.org/r/20220304184040.1304781-1-shakeelb@google.com Fixes: 1f828223b799 ("memcg: flush lruvec stats in the refault") Signed-off-by: Shakeel Butt Reported-by: Daniel Dao Tested-by: Ivan Babrou Cc: Michal Hocko Cc: Roman Gushchin Cc: Johannes Weiner Cc: Michal Koutný Cc: Frank Hofmann Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 5 +++++ mm/memcontrol.c | 12 +++++++++++- mm/workingset.c | 2 +- 3 files changed, 17 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index a68dce3873fc..89b14729d59f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1012,6 +1012,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, } void mem_cgroup_flush_stats(void); +void mem_cgroup_flush_stats_delayed(void); void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); @@ -1455,6 +1456,10 @@ static inline void mem_cgroup_flush_stats(void) { } +static inline void mem_cgroup_flush_stats_delayed(void) +{ +} + static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 725f76723220..598fece89e2b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -587,6 +587,9 @@ static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); static DEFINE_SPINLOCK(stats_flush_lock); static DEFINE_PER_CPU(unsigned int, stats_updates); static atomic_t stats_flush_threshold = ATOMIC_INIT(0); +static u64 flush_next_time; + +#define FLUSH_TIME (2UL*HZ) /* * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can @@ -637,6 +640,7 @@ static void __mem_cgroup_flush_stats(void) if (!spin_trylock_irqsave(&stats_flush_lock, flag)) return; + flush_next_time = jiffies_64 + 2*FLUSH_TIME; cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup); atomic_set(&stats_flush_threshold, 0); spin_unlock_irqrestore(&stats_flush_lock, flag); @@ -648,10 +652,16 @@ void mem_cgroup_flush_stats(void) __mem_cgroup_flush_stats(); } +void mem_cgroup_flush_stats_delayed(void) +{ + if (time_after64(jiffies_64, flush_next_time)) + mem_cgroup_flush_stats(); +} + static void flush_memcg_stats_dwork(struct work_struct *w) { __mem_cgroup_flush_stats(); - queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ); + queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); } /** diff --git a/mm/workingset.c b/mm/workingset.c index 8a3828acc0bf..592569a8974c 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -355,7 +355,7 @@ void workingset_refault(struct folio *folio, void *shadow) mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); - mem_cgroup_flush_stats(); + mem_cgroup_flush_stats_delayed(); /* * Compare the distance to the existing workingset size. We * don't activate pages that couldn't stay resident even if -- cgit v1.2.3-71-gd317 From 5f24d5a579d1eace79d505b148808a850b417d4c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 21 Apr 2022 16:35:46 -0700 Subject: mm, hugetlb: allow for "high" userspace addresses This is a fix for commit f6795053dac8 ("mm: mmap: Allow for "high" userspace addresses") for hugetlb. This patch adds support for "high" userspace addresses that are optionally supported on the system and have to be requested via a hint mechanism ("high" addr parameter to mmap). Architectures such as powerpc and x86 achieve this by making changes to their architectural versions of hugetlb_get_unmapped_area() function. However, arm64 uses the generic version of that function. So take into account arch_get_mmap_base() and arch_get_mmap_end() in hugetlb_get_unmapped_area(). To allow that, move those two macros out of mm/mmap.c into include/linux/sched/mm.h If these macros are not defined in architectural code then they default to (TASK_SIZE) and (base) so should not introduce any behavioural changes to architectures that do not define them. For the time being, only ARM64 is affected by this change. Catalin (ARM64) said "We should have fixed hugetlb_get_unmapped_area() as well when we added support for 52-bit VA. The reason for commit f6795053dac8 was to prevent normal mmap() from returning addresses above 48-bit by default as some user-space had hard assumptions about this. It's a slight ABI change if you do this for hugetlb_get_unmapped_area() but I doubt anyone would notice. It's more likely that the current behaviour would cause issues, so I'd rather have them consistent. Basically when arm64 gained support for 52-bit addresses we did not want user-space calling mmap() to suddenly get such high addresses, otherwise we could have inadvertently broken some programs (similar behaviour to x86 here). Hence we added commit f6795053dac8. But we missed hugetlbfs which could still get such high mmap() addresses. So in theory that's a potential regression that should have bee addressed at the same time as commit f6795053dac8 (and before arm64 enabled 52-bit addresses)" Link: https://lkml.kernel.org/r/ab847b6edb197bffdfe189e70fb4ac76bfe79e0d.1650033747.git.christophe.leroy@csgroup.eu Fixes: f6795053dac8 ("mm: mmap: Allow for "high" userspace addresses") Signed-off-by: Christophe Leroy Reviewed-by: Catalin Marinas Cc: Steve Capper Cc: Will Deacon Cc: [5.0.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/hugetlbfs/inode.c | 9 +++++---- include/linux/sched/mm.h | 8 ++++++++ mm/mmap.c | 8 -------- 3 files changed, 13 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 99c7477cee5c..dd3a088db11d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, info.flags = 0; info.length = len; info.low_limit = current->mm->mmap_base; - info.high_limit = TASK_SIZE; + info.high_limit = arch_get_mmap_end(addr); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; return vm_unmapped_area(&info); @@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); - info.high_limit = current->mm->mmap_base; + info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; addr = vm_unmapped_area(&info); @@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = current->mm->mmap_base; - info.high_limit = TASK_SIZE; + info.high_limit = arch_get_mmap_end(addr); addr = vm_unmapped_area(&info); } @@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct hstate *h = hstate_file(file); + const unsigned long mmap_end = arch_get_mmap_end(addr); if (len & ~huge_page_mask(h)) return -EINVAL; @@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && + if (mmap_end - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index a80356e9dc69..1ad1f4bfa025 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -136,6 +136,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm) #endif /* CONFIG_MEMCG */ #ifdef CONFIG_MMU +#ifndef arch_get_mmap_end +#define arch_get_mmap_end(addr) (TASK_SIZE) +#endif + +#ifndef arch_get_mmap_base +#define arch_get_mmap_base(addr, base) (base) +#endif + extern void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack); extern unsigned long diff --git a/mm/mmap.c b/mm/mmap.c index 3aa839f81e63..313b57d55a63 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2117,14 +2117,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) return addr; } -#ifndef arch_get_mmap_end -#define arch_get_mmap_end(addr) (TASK_SIZE) -#endif - -#ifndef arch_get_mmap_base -#define arch_get_mmap_base(addr, base) (base) -#endif - /* Get an address range which is currently unmapped. * For shmat() with addr=0. * -- cgit v1.2.3-71-gd317 From e4a38402c36e42df28eb1a5394be87e6571fb48a Mon Sep 17 00:00:00 2001 From: Nico Pache Date: Thu, 21 Apr 2022 16:36:01 -0700 Subject: oom_kill.c: futex: delay the OOM reaper to allow time for proper futex cleanup The pthread struct is allocated on PRIVATE|ANONYMOUS memory [1] which can be targeted by the oom reaper. This mapping is used to store the futex robust list head; the kernel does not keep a copy of the robust list and instead references a userspace address to maintain the robustness during a process death. A race can occur between exit_mm and the oom reaper that allows the oom reaper to free the memory of the futex robust list before the exit path has handled the futex death: CPU1 CPU2 -------------------------------------------------------------------- page_fault do_exit "signal" wake_oom_reaper oom_reaper oom_reap_task_mm (invalidates mm) exit_mm exit_mm_release futex_exit_release futex_cleanup exit_robust_list get_user (EFAULT- can't access memory) If the get_user EFAULT's, the kernel will be unable to recover the waiters on the robust_list, leaving userspace mutexes hung indefinitely. Delay the OOM reaper, allowing more time for the exit path to perform the futex cleanup. Reproducer: https://gitlab.com/jsavitz/oom_futex_reproducer Based on a patch by Michal Hocko. Link: https://elixir.bootlin.com/glibc/glibc-2.35/source/nptl/allocatestack.c#L370 [1] Link: https://lkml.kernel.org/r/20220414144042.677008-1-npache@redhat.com Fixes: 212925802454 ("mm: oom: let oom_reap_task and exit_mmap run concurrently") Signed-off-by: Joel Savitz Signed-off-by: Nico Pache Co-developed-by: Joel Savitz Suggested-by: Thomas Gleixner Acked-by: Thomas Gleixner Acked-by: Michal Hocko Cc: Rafael Aquini Cc: Waiman Long Cc: Herton R. Krzesinski Cc: Juri Lelli Cc: Vincent Guittot Cc: Dietmar Eggemann Cc: Steven Rostedt Cc: Ben Segall Cc: Mel Gorman Cc: Daniel Bristot de Oliveira Cc: David Rientjes Cc: Andrea Arcangeli Cc: Davidlohr Bueso Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Joel Savitz Cc: Darren Hart Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 1 + mm/oom_kill.c | 54 ++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 41 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index d5e3c00b74e1..a8911b1f35aa 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1443,6 +1443,7 @@ struct task_struct { int pagefault_disabled; #ifdef CONFIG_MMU struct task_struct *oom_reaper_list; + struct timer_list oom_reaper_timer; #endif #ifdef CONFIG_VMAP_STACK struct vm_struct *stack_vm_area; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 7ec38194f8e1..49d7df39b02d 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -632,7 +632,7 @@ done: */ set_bit(MMF_OOM_SKIP, &mm->flags); - /* Drop a reference taken by wake_oom_reaper */ + /* Drop a reference taken by queue_oom_reaper */ put_task_struct(tsk); } @@ -644,12 +644,12 @@ static int oom_reaper(void *unused) struct task_struct *tsk = NULL; wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); - spin_lock(&oom_reaper_lock); + spin_lock_irq(&oom_reaper_lock); if (oom_reaper_list != NULL) { tsk = oom_reaper_list; oom_reaper_list = tsk->oom_reaper_list; } - spin_unlock(&oom_reaper_lock); + spin_unlock_irq(&oom_reaper_lock); if (tsk) oom_reap_task(tsk); @@ -658,22 +658,48 @@ static int oom_reaper(void *unused) return 0; } -static void wake_oom_reaper(struct task_struct *tsk) +static void wake_oom_reaper(struct timer_list *timer) { - /* mm is already queued? */ - if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) - return; + struct task_struct *tsk = container_of(timer, struct task_struct, + oom_reaper_timer); + struct mm_struct *mm = tsk->signal->oom_mm; + unsigned long flags; - get_task_struct(tsk); + /* The victim managed to terminate on its own - see exit_mmap */ + if (test_bit(MMF_OOM_SKIP, &mm->flags)) { + put_task_struct(tsk); + return; + } - spin_lock(&oom_reaper_lock); + spin_lock_irqsave(&oom_reaper_lock, flags); tsk->oom_reaper_list = oom_reaper_list; oom_reaper_list = tsk; - spin_unlock(&oom_reaper_lock); + spin_unlock_irqrestore(&oom_reaper_lock, flags); trace_wake_reaper(tsk->pid); wake_up(&oom_reaper_wait); } +/* + * Give the OOM victim time to exit naturally before invoking the oom_reaping. + * The timers timeout is arbitrary... the longer it is, the longer the worst + * case scenario for the OOM can take. If it is too small, the oom_reaper can + * get in the way and release resources needed by the process exit path. + * e.g. The futex robust list can sit in Anon|Private memory that gets reaped + * before the exit path is able to wake the futex waiters. + */ +#define OOM_REAPER_DELAY (2*HZ) +static void queue_oom_reaper(struct task_struct *tsk) +{ + /* mm is already queued? */ + if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) + return; + + get_task_struct(tsk); + timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0); + tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY; + add_timer(&tsk->oom_reaper_timer); +} + static int __init oom_init(void) { oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); @@ -681,7 +707,7 @@ static int __init oom_init(void) } subsys_initcall(oom_init) #else -static inline void wake_oom_reaper(struct task_struct *tsk) +static inline void queue_oom_reaper(struct task_struct *tsk) { } #endif /* CONFIG_MMU */ @@ -932,7 +958,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message) rcu_read_unlock(); if (can_oom_reap) - wake_oom_reaper(victim); + queue_oom_reaper(victim); mmdrop(mm); put_task_struct(victim); @@ -968,7 +994,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message) task_lock(victim); if (task_will_free_mem(victim)) { mark_oom_victim(victim); - wake_oom_reaper(victim); + queue_oom_reaper(victim); task_unlock(victim); put_task_struct(victim); return; @@ -1067,7 +1093,7 @@ bool out_of_memory(struct oom_control *oc) */ if (task_will_free_mem(current)) { mark_oom_victim(current); - wake_oom_reaper(current); + queue_oom_reaper(current); return true; } -- cgit v1.2.3-71-gd317 From 6510ea973d8d9d4a0cb2fb557b36bd1ab3eb49f6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 25 Apr 2022 18:39:46 +0200 Subject: net: Use this_cpu_inc() to increment net->core_stats The macro dev_core_stats_##FIELD##_inc() disables preemption and invokes netdev_core_stats_alloc() to return a per-CPU pointer. netdev_core_stats_alloc() will allocate memory on its first invocation which breaks on PREEMPT_RT because it requires non-atomic context for memory allocation. This can be avoided by enabling preemption in netdev_core_stats_alloc() assuming the caller always disables preemption. It might be better to replace local_inc() with this_cpu_inc() now that dev_core_stats_##FIELD##_inc() gained a preempt-disable section and does not rely on already disabled preemption. This results in less instructions on x86-64: local_inc: | incl %gs:__preempt_count(%rip) # __preempt_count | movq 488(%rdi), %rax # _1->core_stats, _22 | testq %rax, %rax # _22 | je .L585 #, | add %gs:this_cpu_off(%rip), %rax # this_cpu_off, tcp_ptr__ | .L586: | testq %rax, %rax # _27 | je .L587 #, | incq (%rax) # _6->a.counter | .L587: | decl %gs:__preempt_count(%rip) # __preempt_count this_cpu_inc(), this patch: | movq 488(%rdi), %rax # _1->core_stats, _5 | testq %rax, %rax # _5 | je .L591 #, | .L585: | incq %gs:(%rax) # _18->rx_dropped Use unsigned long as type for the counter. Use this_cpu_inc() to increment the counter. Use a plain read of the counter. Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/YmbO0pxgtKpCw4SY@linutronix.de Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 21 +++++++++------------ net/core/dev.c | 14 +++++--------- 2 files changed, 14 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 59e27a2b7bf0..b1fbe21650bb 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -199,10 +199,10 @@ struct net_device_stats { * Try to fit them in a single cache line, for dev_get_stats() sake. */ struct net_device_core_stats { - local_t rx_dropped; - local_t tx_dropped; - local_t rx_nohandler; -} __aligned(4 * sizeof(local_t)); + unsigned long rx_dropped; + unsigned long tx_dropped; + unsigned long rx_nohandler; +} __aligned(4 * sizeof(unsigned long)); #include #include @@ -3843,15 +3843,15 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev, return false; } -struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev); +struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev); -static inline struct net_device_core_stats *dev_core_stats(struct net_device *dev) +static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev) { /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); if (likely(p)) - return this_cpu_ptr(p); + return p; return netdev_core_stats_alloc(dev); } @@ -3859,14 +3859,11 @@ static inline struct net_device_core_stats *dev_core_stats(struct net_device *de #define DEV_CORE_STATS_INC(FIELD) \ static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ { \ - struct net_device_core_stats *p; \ + struct net_device_core_stats __percpu *p; \ \ - preempt_disable(); \ p = dev_core_stats(dev); \ - \ if (p) \ - local_inc(&p->FIELD); \ - preempt_enable(); \ + this_cpu_inc(p->FIELD); \ } DEV_CORE_STATS_INC(rx_dropped) DEV_CORE_STATS_INC(tx_dropped) diff --git a/net/core/dev.c b/net/core/dev.c index 8c6c08446556..1461c2d9dec8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10304,7 +10304,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, } EXPORT_SYMBOL(netdev_stats_to_stats64); -struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev) +struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev) { struct net_device_core_stats __percpu *p; @@ -10315,11 +10315,7 @@ struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev) free_percpu(p); /* This READ_ONCE() pairs with the cmpxchg() above */ - p = READ_ONCE(dev->core_stats); - if (!p) - return NULL; - - return this_cpu_ptr(p); + return READ_ONCE(dev->core_stats); } EXPORT_SYMBOL(netdev_core_stats_alloc); @@ -10356,9 +10352,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, for_each_possible_cpu(i) { core_stats = per_cpu_ptr(p, i); - storage->rx_dropped += local_read(&core_stats->rx_dropped); - storage->tx_dropped += local_read(&core_stats->tx_dropped); - storage->rx_nohandler += local_read(&core_stats->rx_nohandler); + storage->rx_dropped += READ_ONCE(core_stats->rx_dropped); + storage->tx_dropped += READ_ONCE(core_stats->tx_dropped); + storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler); } } return storage; -- cgit v1.2.3-71-gd317 From e5be15767e7e284351853cbaba80cde8620341fb Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 25 Apr 2022 08:07:48 -0400 Subject: hex2bin: make the function hex_to_bin constant-time The function hex2bin is used to load cryptographic keys into device mapper targets dm-crypt and dm-integrity. It should take constant time independent on the processed data, so that concurrently running unprivileged code can't infer any information about the keys via microarchitectural convert channels. This patch changes the function hex_to_bin so that it contains no branches and no memory accesses. Note that this shouldn't cause performance degradation because the size of the new function is the same as the size of the old function (on x86-64) - and the new function causes no branch misprediction penalties. I compile-tested this function with gcc on aarch64 alpha arm hppa hppa64 i386 ia64 m68k mips32 mips64 powerpc powerpc64 riscv sh4 s390x sparc32 sparc64 x86_64 and with clang on aarch64 arm hexagon i386 mips32 mips64 powerpc powerpc64 s390x sparc32 sparc64 x86_64 to verify that there are no branches in the generated code. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 2 +- lib/hexdump.c | 32 +++++++++++++++++++++++++------- 2 files changed, 26 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index a890428bcc1a..fe6efb24d151 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -285,7 +285,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte) return buf; } -extern int hex_to_bin(char ch); +extern int hex_to_bin(unsigned char ch); extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); extern char *bin2hex(char *dst, const void *src, size_t count); diff --git a/lib/hexdump.c b/lib/hexdump.c index 9301578f98e8..369420ce553a 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -22,15 +22,33 @@ EXPORT_SYMBOL(hex_asc_upper); * * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad * input. + * + * This function is used to load cryptographic keys, so it is coded in such a + * way that there are no conditions or memory accesses that depend on data. + * + * Explanation of the logic: + * (ch - '9' - 1) is negative if ch <= '9' + * ('0' - 1 - ch) is negative if ch >= '0' + * we "and" these two values, so the result is negative if ch is in the range + * '0' ... '9' + * we are only interested in the sign, so we do a shift ">> 8"; note that right + * shift of a negative value is implementation-defined, so we cast the + * value to (unsigned) before the shift --- we have 0xffffff if ch is in + * the range '0' ... '9', 0 otherwise + * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is + * in the range '0' ... '9', 0 otherwise + * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0' + * ... '9', -1 otherwise + * the next line is similar to the previous one, but we need to decode both + * uppercase and lowercase letters, so we use (ch & 0xdf), which converts + * lowercase to uppercase */ -int hex_to_bin(char ch) +int hex_to_bin(unsigned char ch) { - if ((ch >= '0') && (ch <= '9')) - return ch - '0'; - ch = tolower(ch); - if ((ch >= 'a') && (ch <= 'f')) - return ch - 'a' + 10; - return -1; + unsigned char cu = ch & 0xdf; + return -1 + + ((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) + + ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8); } EXPORT_SYMBOL(hex_to_bin); -- cgit v1.2.3-71-gd317