From 77f88796cee819b9c4562b0b6b44691b3b7755b1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 16 Mar 2017 16:54:24 -0400 Subject: cgroup, kthread: close race window where new kthreads can be migrated to non-root cgroups Creation of a kthread goes through a couple interlocked stages between the kthread itself and its creator. Once the new kthread starts running, it initializes itself and wakes up the creator. The creator then can further configure the kthread and then let it start doing its job by waking it up. In this configuration-by-creator stage, the creator is the only one that can wake it up but the kthread is visible to userland. When altering the kthread's attributes from userland is allowed, this is fine; however, for cases where CPU affinity is critical, kthread_bind() is used to first disable affinity changes from userland and then set the affinity. This also prevents the kthread from being migrated into non-root cgroups as that can affect the CPU affinity and many other things. Unfortunately, the cgroup side of protection is racy. While the PF_NO_SETAFFINITY flag prevents further migrations, userland can win the race before the creator sets the flag with kthread_bind() and put the kthread in a non-root cgroup, which can lead to all sorts of problems including incorrect CPU affinity and starvation. This bug got triggered by userland which periodically tries to migrate all processes in the root cpuset cgroup to a non-root one. Per-cpu workqueue workers got caught while being created and ended up with incorrected CPU affinity breaking concurrency management and sometimes stalling workqueue execution. This patch adds task->no_cgroup_migration which disallows the task to be migrated by userland. kthreadd starts with the flag set making every child kthread start in the root cgroup with migration disallowed. The flag is cleared after the kthread finishes initialization by which time PF_NO_SETAFFINITY is set if the kthread should stay in the root cgroup. It'd be better to wait for the initialization instead of failing but I couldn't think of a way of implementing that without adding either a new PF flag, or sleeping and retrying from waiting side. Even if userland depends on changing cgroup membership of a kthread, it either has to be synchronized with kthread_create() or periodically repeat, so it's unlikely that this would break anything. v2: Switch to a simpler implementation using a new task_struct bit field suggested by Oleg. Signed-off-by: Tejun Heo Suggested-by: Oleg Nesterov Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra (Intel) Cc: Thomas Gleixner Reported-and-debugged-by: Chris Mason Cc: stable@vger.kernel.org # v4.3+ (we can't close the race on < v4.3) Signed-off-by: Tejun Heo --- include/linux/cgroup.h | 21 +++++++++++++++++++++ include/linux/sched.h | 4 ++++ 2 files changed, 25 insertions(+) (limited to 'include') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f6b43fbb141c..af9c86e958bd 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } +static inline void cgroup_init_kthreadd(void) +{ + /* + * kthreadd is inherited by all kthreads, keep it in the root so + * that the new kthreads are guaranteed to stay in the root until + * initialization is finished. + */ + current->no_cgroup_migration = 1; +} + +static inline void cgroup_kthread_ready(void) +{ + /* + * This kthread finished initialization. The creator should have + * set PF_NO_SETAFFINITY if this kthread should stay in the root. + */ + current->no_cgroup_migration = 0; +} + #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; @@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init(void) { return 0; } +static inline void cgroup_init_kthreadd(void) {} +static inline void cgroup_kthread_ready(void) {} static inline bool task_under_cgroup_hierarchy(struct task_struct *task, struct cgroup *ancestor) diff --git a/include/linux/sched.h b/include/linux/sched.h index d67eee84fd43..4cf9a59a4d08 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -604,6 +604,10 @@ struct task_struct { #ifdef CONFIG_COMPAT_BRK unsigned brk_randomized:1; #endif +#ifdef CONFIG_CGROUPS + /* disallow userland-initiated cgroup migration */ + unsigned no_cgroup_migration:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ -- cgit v1.2.3-71-gd317 From 5659495a7a1455665ce1466d156597ad1bda8772 Mon Sep 17 00:00:00 2001 From: Naohiro Aota Date: Fri, 24 Mar 2017 23:04:44 -0700 Subject: uapi: add missing install of userio.h While commit 5523662edd4f ("Input: add userio module") added userio.h under the uapi/ directory, it forgot to add the header file to Kbuild. Thus, the file was missing from header installation. Signed-off-by: Naohiro Aota Reviewed-by: Lyude Paul Signed-off-by: Dmitry Torokhov --- include/uapi/linux/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index f330ba4547cf..b4a9a1891db6 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -437,6 +437,7 @@ header-y += unistd.h header-y += unix_diag.h header-y += usbdevice_fs.h header-y += usbip.h +header-y += userio.h header-y += utime.h header-y += utsname.h header-y += uuid.h -- cgit v1.2.3-71-gd317 From f9ba3501d50317697811ff3c48f623f08d616fc8 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 27 Mar 2017 00:21:15 +0800 Subject: sctp: change to save MSG_MORE flag into assoc David Laight noticed the support for MSG_MORE with datamsg->force_delay didn't really work as we expected, as the first msg with MSG_MORE set would always block the following chunks' dequeuing. This Patch is to rewrite it by saving the MSG_MORE flag into assoc as David Laight suggested. asoc->force_delay is used to save MSG_MORE flag before a msg is sent. All chunks in queue would not be sent out if asoc->force_delay is set by the msg with MSG_MORE flag, until a new msg without MSG_MORE flag clears asoc->force_delay. Note that this change would not affect the flush is generated by other triggers, like asoc->state != ESTABLISHED, queue size > pmtu etc. v1->v2: Not clear asoc->force_delay after sending the msg with MSG_MORE flag. Fixes: 4ea0c32f5f42 ("sctp: add support for MSG_MORE") Signed-off-by: Xin Long Acked-by: David Laight Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 2 +- net/sctp/output.c | 2 +- net/sctp/socket.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 592decebac75..8caa5ee9e290 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -499,7 +499,6 @@ struct sctp_datamsg { /* Did the messenge fail to send? */ int send_error; u8 send_failed:1, - force_delay:1, can_delay; /* should this message be Nagle delayed */ }; @@ -1878,6 +1877,7 @@ struct sctp_association { __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ temp:1, /* Is it a temporary association? */ + force_delay:1, prsctp_enable:1, reconf_enable:1; diff --git a/net/sctp/output.c b/net/sctp/output.c index 1224421036b3..73fd178007a3 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -704,7 +704,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, */ if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && - !chunk->msg->force_delay) + !asoc->force_delay) /* Nothing unacked */ return SCTP_XMIT_OK; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0f378ea2ae38..baa269a0d52e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1965,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) err = PTR_ERR(datamsg); goto out_free; } - datamsg->force_delay = !!(msg->msg_flags & MSG_MORE); + asoc->force_delay = !!(msg->msg_flags & MSG_MORE); /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { -- cgit v1.2.3-71-gd317 From 9ad59fea162c139f62335f0ca0ce1fdf4f82bd91 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 2 Mar 2017 16:05:45 +0100 Subject: drm/etnaviv: submit support for in-fences Loosely based on commit f0a42bb5423a ("drm/msm: submit support for in-fences"). Unfortunately, struct drm_etnaviv_gem_submit doesn't have a flags field yet, so we have to extend the structure and trust that drm_ioctl will clear the flags for us if an older userspace only submits part of the struct. Signed-off-by: Philipp Zabel Reviewed-by: Gustavo Padovan Reviewed-by: Sumit Semwal Reviewed-by: Lucas Stach Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/Kconfig | 1 + drivers/gpu/drm/etnaviv/etnaviv_gem.h | 1 + drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 34 +++++++++++++++++++++++++++- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 5 +++- drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 2 +- include/uapi/drm/etnaviv_drm.h | 6 +++++ 6 files changed, 46 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index cc1731c5289c..71cee4e9fefb 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -5,6 +5,7 @@ config DRM_ETNAVIV depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) depends on MMU select SHMEM + select SYNC_FILE select TMPFS select IOMMU_API select IOMMU_SUPPORT diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index e63ff116a3b3..120410d67eb5 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -107,6 +107,7 @@ struct etnaviv_gem_submit { u32 fence; unsigned int nr_bos; struct etnaviv_gem_submit_bo bos[0]; + u32 flags; }; int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 726090d7a6ac..fb8d5befbf4f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -14,7 +14,9 @@ * this program. If not, see . */ +#include #include +#include #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" #include "etnaviv_gpu.h" @@ -169,8 +171,10 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; + bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); - ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write); + ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, + explicit); if (ret) break; } @@ -303,6 +307,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, struct etnaviv_gem_submit *submit; struct etnaviv_cmdbuf *cmdbuf; struct etnaviv_gpu *gpu; + struct dma_fence *in_fence = NULL; void *stream; int ret; @@ -326,6 +331,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, return -EINVAL; } + if (args->flags & ~ETNA_SUBMIT_FLAGS) { + DRM_ERROR("invalid flags: 0x%x\n", args->flags); + return -EINVAL; + } + /* * Copy the command submission and bo array to kernel space in * one go, and do this outside of any locks. @@ -371,6 +381,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, goto err_submit_cmds; } + submit->flags = args->flags; + ret = submit_lookup_objects(submit, file, bos, args->nr_bos); if (ret) goto err_submit_objects; @@ -385,6 +397,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, goto err_submit_objects; } + if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) { + in_fence = sync_file_get_fence(args->fence_fd); + if (!in_fence) { + ret = -EINVAL; + goto err_submit_objects; + } + + /* + * Wait if the fence is from a foreign context, or if the fence + * array contains any fence from a foreign context. + */ + if (!dma_fence_match_context(in_fence, gpu->fence_context)) { + ret = dma_fence_wait(in_fence, true); + if (ret) + goto err_submit_objects; + } + } + ret = submit_fence_sync(submit); if (ret) goto err_submit_objects; @@ -419,6 +449,8 @@ out: flush_workqueue(priv->wq); err_submit_objects: + if (in_fence) + dma_fence_put(in_fence); submit_cleanup(submit); err_submit_cmds: diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index cab4cf546c17..68a4f59e4c22 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1077,7 +1077,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) } int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, - unsigned int context, bool exclusive) + unsigned int context, bool exclusive, bool explicit) { struct reservation_object *robj = etnaviv_obj->resv; struct reservation_object_list *fobj; @@ -1090,6 +1090,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, return ret; } + if (explicit) + return 0; + /* * If we have any shared fences, then the exclusive fence * should be ignored as it will already have been signalled. diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 6a1e68eec24c..9227a9740447 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -183,7 +183,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); #endif int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, - unsigned int context, bool exclusive); + unsigned int context, bool exclusive, bool implicit); void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index 2584c1cca42f..e9c388a1d8eb 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h @@ -154,6 +154,10 @@ struct drm_etnaviv_gem_submit_bo { * one or more cmdstream buffers. This allows for conditional execution * (context-restore), and IB buffers needed for per tile/bin draw cmds. */ +#define ETNA_SUBMIT_NO_IMPLICIT 0x0001 +#define ETNA_SUBMIT_FENCE_FD_IN 0x0002 +#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \ + ETNA_SUBMIT_FENCE_FD_IN) #define ETNA_PIPE_3D 0x00 #define ETNA_PIPE_2D 0x01 #define ETNA_PIPE_VG 0x02 @@ -167,6 +171,8 @@ struct drm_etnaviv_gem_submit { __u64 bos; /* in, ptr to array of submit_bo's */ __u64 relocs; /* in, ptr to array of submit_reloc's */ __u64 stream; /* in, ptr to cmdstream */ + __u32 flags; /* in, mask of ETNA_SUBMIT_x */ + __s32 fence_fd; /* in, fence fd (see ETNA_SUBMIT_FENCE_FD_IN) */ }; /* The normal way to synchronize with the GPU is just to CPU_PREP on -- cgit v1.2.3-71-gd317 From 78ec187f64fa5d8f837b8fc5bbbad88a89b63ab4 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 2 Mar 2017 16:14:43 +0100 Subject: drm/etnaviv: submit support for out-fences Based on commit 4cd0945901a6 ("drm/msm: submit support for out-fences"). We increment the minor driver version so userspace can detect explicit fence support. Signed-off-by: Philipp Zabel Signed-off-by: Lucas Stach --- v3: Changed to work with fence returned from GPU submit. --- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 28 ++++++++++++++++++++++++++++ include/uapi/drm/etnaviv_drm.h | 6 ++++-- 3 files changed, 33 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 289a9f8c6671..5255278dde56 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -512,7 +512,7 @@ static struct drm_driver etnaviv_drm_driver = { .desc = "etnaviv DRM", .date = "20151214", .major = 1, - .minor = 0, + .minor = 1, }; /* diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 1b6f9b843815..e1909429837e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -309,6 +309,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, struct etnaviv_cmdbuf *cmdbuf; struct etnaviv_gpu *gpu; struct dma_fence *in_fence = NULL; + struct sync_file *sync_file = NULL; + int out_fence_fd = -1; void *stream; int ret; @@ -376,6 +378,14 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, goto err_submit_cmds; } + if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { + out_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (out_fence_fd < 0) { + ret = out_fence_fd; + goto err_submit_cmds; + } + } + submit = submit_create(dev, gpu, args->nr_bos); if (!submit) { ret = -ENOMEM; @@ -436,6 +446,22 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret == 0) cmdbuf = NULL; + if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { + /* + * This can be improved: ideally we want to allocate the sync + * file before kicking off the GPU job and just attach the + * fence to the sync file here, eliminating the ENOMEM + * possibility at this stage. + */ + sync_file = sync_file_create(submit->fence); + if (!sync_file) { + ret = -ENOMEM; + goto out; + } + fd_install(out_fence_fd, sync_file->file); + } + + args->fence_fd = out_fence_fd; args->fence = submit->fence->seqno; out: @@ -455,6 +481,8 @@ err_submit_objects: submit_cleanup(submit); err_submit_cmds: + if (ret && (out_fence_fd >= 0)) + put_unused_fd(out_fence_fd); /* if we still own the cmdbuf */ if (cmdbuf) etnaviv_cmdbuf_free(cmdbuf); diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index e9c388a1d8eb..76f6f78a352b 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h @@ -156,8 +156,10 @@ struct drm_etnaviv_gem_submit_bo { */ #define ETNA_SUBMIT_NO_IMPLICIT 0x0001 #define ETNA_SUBMIT_FENCE_FD_IN 0x0002 +#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004 #define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \ - ETNA_SUBMIT_FENCE_FD_IN) + ETNA_SUBMIT_FENCE_FD_IN | \ + ETNA_SUBMIT_FENCE_FD_OUT) #define ETNA_PIPE_3D 0x00 #define ETNA_PIPE_2D 0x01 #define ETNA_PIPE_VG 0x02 @@ -172,7 +174,7 @@ struct drm_etnaviv_gem_submit { __u64 relocs; /* in, ptr to array of submit_reloc's */ __u64 stream; /* in, ptr to cmdstream */ __u32 flags; /* in, mask of ETNA_SUBMIT_x */ - __s32 fence_fd; /* in, fence fd (see ETNA_SUBMIT_FENCE_FD_IN) */ + __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */ }; /* The normal way to synchronize with the GPU is just to CPU_PREP on -- cgit v1.2.3-71-gd317 From 49cb77e297dc611a1b795cfeb79452b3002bd331 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 27 Mar 2017 16:12:43 -0700 Subject: target: Avoid mappedlun symlink creation during lun shutdown This patch closes a race between se_lun deletion during configfs unlink in target_fabric_port_unlink() -> core_dev_del_lun() -> core_tpg_remove_lun(), when transport_clear_lun_ref() blocks waiting for percpu_ref RCU grace period to finish, but a new NodeACL mappedlun is added before the RCU grace period has completed. This can happen in target_fabric_mappedlun_link() because it only checks for se_lun->lun_se_dev, which is not cleared until after transport_clear_lun_ref() percpu_ref RCU grace period finishes. This bug originally manifested as NULL pointer dereference OOPsen in target_stat_scsi_att_intr_port_show_attr_dev() on v4.1.y code, because it dereferences lun->lun_se_dev without a explicit NULL pointer check. In post v4.1 code with target-core RCU conversion, the code in target_stat_scsi_att_intr_port_show_attr_dev() no longer uses se_lun->lun_se_dev, but the same race still exists. To address the bug, go ahead and set se_lun>lun_shutdown as early as possible in core_tpg_remove_lun(), and ensure new NodeACL mappedlun creation in target_fabric_mappedlun_link() fails during se_lun shutdown. Reported-by: James Shen Cc: James Shen Tested-by: James Shen Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_fabric_configfs.c | 5 +++++ drivers/target/target_core_tpg.c | 4 ++++ include/target/target_core_base.h | 1 + 3 files changed, 10 insertions(+) (limited to 'include') diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index d8a16ca6baa5..d1e6cab8e3d3 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link( pr_err("Source se_lun->lun_se_dev does not exist\n"); return -EINVAL; } + if (lun->lun_shutdown) { + pr_err("Unable to create mappedlun symlink because" + " lun->lun_shutdown=true\n"); + return -EINVAL; + } se_tpg = lun->lun_tpg; nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 6fb191914f45..dfaef4d3b2d2 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -642,6 +642,8 @@ void core_tpg_remove_lun( */ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + lun->lun_shutdown = true; + core_clear_lun_from_tpg(lun, tpg); /* * Wait for any active I/O references to percpu se_lun->lun_ref to @@ -663,6 +665,8 @@ void core_tpg_remove_lun( } if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) hlist_del_rcu(&lun->link); + + lun->lun_shutdown = false; mutex_unlock(&tpg->tpg_lun_mutex); percpu_ref_exit(&lun->lun_ref); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 4b784b6e21c0..2e282461cfa5 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -705,6 +705,7 @@ struct se_lun { u64 unpacked_lun; #define SE_LUN_LINK_MAGIC 0xffff7771 u32 lun_link_magic; + bool lun_shutdown; bool lun_access_ro; u32 lun_index; -- cgit v1.2.3-71-gd317 From fe25deb7737ce6c0879ccf79c99fa1221d428bf2 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 27 Mar 2017 11:21:25 +0200 Subject: drm/ttm, drm/vmwgfx: Relax permission checking when opening surfaces Previously, when a surface was opened using a legacy (non prime) handle, it was verified to have been created by a client in the same master realm. Relax this so that opening is also allowed recursively if the client already has the surface open. This works around a regression in svga mesa where opening of a shared surface is used recursively to obtain surface information. Cc: Signed-off-by: Thomas Hellstrom Reviewed-by: Sinclair Yeh --- drivers/gpu/drm/ttm/ttm_object.c | 10 +++++++--- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 6 ++---- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 22 +++++++++------------- include/drm/ttm/ttm_object.h | 5 ++++- 5 files changed, 24 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index fdb451e3ec01..d750140bafbc 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, if (unlikely(ret != 0)) goto out_err0; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); if (unlikely(ret != 0)) goto out_err1; @@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists); int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed) + enum ttm_ref_type ref_type, bool *existed, + bool require_existed) { struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; struct ttm_ref_object *ref; @@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, } rcu_read_unlock(); + if (require_existed) + return -EPERM; + ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false); if (unlikely(ret != 0)) @@ -635,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, prime = (struct ttm_prime_object *) dma_buf->priv; base = &prime->base; *handle = base->hash.key; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); dma_buf_put(dma_buf); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 4076063e0fdd..6b2708b4eafe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1075,10 +1075,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, (void) vmw_fence_obj_reference(fence); if (user_fence_rep != NULL) { - bool existed; - - ret = ttm_ref_object_add(tfile, base, - TTM_REF_USAGE, &existed); + ret = ttm_ref_object_add(vmw_fp->tfile, base, + TTM_REF_USAGE, NULL, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed to reference a fence " "object.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 65b3f0369636..bf23153d4f55 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, return ret; ret = ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_SYNCCPU_WRITE, &existed); + TTM_REF_SYNCCPU_WRITE, &existed, false); if (ret != 0 || existed) ttm_bo_synccpu_write_release(&user_bo->dma.base); @@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, *handle = user_bo->prime.base.hash.key; return ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_USAGE, NULL); + TTM_REF_USAGE, NULL, false); } /* diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index f410502cb075..adc023fe67f3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -891,17 +891,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, uint32_t handle; struct ttm_base_object *base; int ret; + bool require_exist = false; if (handle_type == DRM_VMW_HANDLE_PRIME) { ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); if (unlikely(ret != 0)) return ret; } else { - if (unlikely(drm_is_render_client(file_priv))) { - DRM_ERROR("Render client refused legacy " - "surface reference.\n"); - return -EACCES; - } + if (unlikely(drm_is_render_client(file_priv))) + require_exist = true; + if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { DRM_ERROR("Locked master refused legacy " "surface reference.\n"); @@ -929,17 +928,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, /* * Make sure the surface creator has the same - * authenticating master. + * authenticating master, or is already registered with us. */ if (drm_is_primary_client(file_priv) && - user_srf->master != file_priv->master) { - DRM_ERROR("Trying to reference surface outside of" - " master domain.\n"); - ret = -EACCES; - goto out_bad_resource; - } + user_srf->master != file_priv->master) + require_exist = true; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, + require_exist); if (unlikely(ret != 0)) { DRM_ERROR("Could not add a reference to a surface.\n"); goto out_bad_resource; diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h index ed953f98f0e1..1487011fe057 100644 --- a/include/drm/ttm/ttm_object.h +++ b/include/drm/ttm/ttm_object.h @@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); * @ref_type: The type of reference. * @existed: Upon completion, indicates that an identical reference object * already existed, and the refcount was upped on that object instead. + * @require_existed: Fail with -EPERM if an identical ref object didn't + * already exist. * * Checks that the base object is shareable and adds a ref object to it. * @@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); */ extern int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed); + enum ttm_ref_type ref_type, bool *existed, + bool require_existed); extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, struct ttm_base_object *base); -- cgit v1.2.3-71-gd317 From 3dbcc105d5561e18ccd0842c7baab1c835562a37 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 30 Mar 2017 01:00:53 +0800 Subject: sctp: alloc stream info when initializing asoc When sending a msg without asoc established, sctp will send INIT packet first and then enqueue chunks. Before receiving INIT_ACK, stream info is not yet alloced. But enqueuing chunks needs to access stream info, like out stream state and out stream cnt. This patch is to fix it by allocing out stream info when initializing an asoc, allocing in stream and re-allocing out stream when processing init. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 3 ++- net/sctp/associola.c | 7 ++++++- net/sctp/sm_make_chunk.c | 9 ++------- net/sctp/stream.c | 43 +++++++++++++++++++++++++++++++++++-------- 4 files changed, 45 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 8caa5ee9e290..a127b7c2c3c9 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -377,7 +377,8 @@ typedef struct sctp_sender_hb_info { __u64 hb_nonce; } sctp_sender_hb_info_t; -struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); +int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp); +int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp); void sctp_stream_free(struct sctp_stream *stream); void sctp_stream_clear(struct sctp_stream *stream); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 0439a1a68367..0b26df5f6188 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -246,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a if (!sctp_ulpq_init(&asoc->ulpq, asoc)) goto fail_init; + if (sctp_stream_new(asoc, gfp)) + goto fail_init; + /* Assume that peer would support both address types unless we are * told otherwise. */ @@ -264,7 +267,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a /* AUTH related initializations */ INIT_LIST_HEAD(&asoc->endpoint_shared_keys); if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) - goto fail_init; + goto stream_free; asoc->active_key_id = ep->active_key_id; asoc->prsctp_enable = ep->prsctp_enable; @@ -287,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a return asoc; +stream_free: + sctp_stream_free(asoc->stream); fail_init: sock_put(asoc->base.sk); sctp_endpoint_put(asoc->ep); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 969a30c7bb54..118faff6a332 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, * association. */ if (!asoc->temp) { - int error; - - asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams, - asoc->c.sinit_num_ostreams, gfp); - if (!asoc->stream) + if (sctp_stream_init(asoc, gfp)) goto clean_up; - error = sctp_assoc_set_id(asoc, gfp); - if (error) + if (sctp_assoc_set_id(asoc, gfp)) goto clean_up; } diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 1c6cc04fa3a4..bbed997e1c5f 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -35,33 +35,60 @@ #include #include -struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp) +int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp) { struct sctp_stream *stream; int i; stream = kzalloc(sizeof(*stream), gfp); if (!stream) - return NULL; + return -ENOMEM; - stream->outcnt = outcnt; + stream->outcnt = asoc->c.sinit_num_ostreams; stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); if (!stream->out) { kfree(stream); - return NULL; + return -ENOMEM; } for (i = 0; i < stream->outcnt; i++) stream->out[i].state = SCTP_STREAM_OPEN; - stream->incnt = incnt; + asoc->stream = stream; + + return 0; +} + +int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp) +{ + struct sctp_stream *stream = asoc->stream; + int i; + + /* Initial stream->out size may be very big, so free it and alloc + * a new one with new outcnt to save memory. + */ + kfree(stream->out); + stream->outcnt = asoc->c.sinit_num_ostreams; + stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); + if (!stream->out) + goto nomem; + + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_OPEN; + + stream->incnt = asoc->c.sinit_max_instreams; stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); if (!stream->in) { kfree(stream->out); - kfree(stream); - return NULL; + goto nomem; } - return stream; + return 0; + +nomem: + asoc->stream = NULL; + kfree(stream); + + return -ENOMEM; } void sctp_stream_free(struct sctp_stream *stream) -- cgit v1.2.3-71-gd317 From fa7e25cf13a6d0b82b5ed1008246f44d42e8422c Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 30 Oct 2016 17:28:16 -0700 Subject: target: Fix unknown fabric callback queue-full errors This patch fixes a set of queue-full response handling bugs, where outgoing responses are leaked when a fabric driver is propagating non -EAGAIN or -ENOMEM errors to target-core. It introduces TRANSPORT_COMPLETE_QF_ERR state used to signal when CHECK_CONDITION status should be generated, when fabric driver ->write_pending(), ->queue_data_in(), or ->queue_status() callbacks fail with non -EAGAIN or -ENOMEM errors, and data-transfer should not be retried. Note all fabric driver -EAGAIN and -ENOMEM errors are still retried indefinately with associated data-transfer callbacks, following existing queue-full logic. Also fix two missing ->queue_status() queue-full cases related to CMD_T_ABORTED w/ TAS status handling. Reported-by: Potnuri Bharat Teja Reviewed-by: Potnuri Bharat Teja Tested-by: Potnuri Bharat Teja Cc: Potnuri Bharat Teja Reported-by: Steve Wise Cc: Steve Wise Cc: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 102 ++++++++++++++++++++++----------- include/target/target_core_base.h | 1 + 2 files changed, 69 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b1a3cdb29468..a0cd56ee5fe9 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache; struct kmem_cache *t10_alua_lba_map_mem_cache; static void transport_complete_task_attr(struct se_cmd *cmd); +static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); static void transport_handle_queue_full(struct se_cmd *cmd, - struct se_device *dev); + struct se_device *dev, int err, bool write_pending); static int transport_put_cmd(struct se_cmd *cmd); static void target_complete_ok_work(struct work_struct *work); @@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work) if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) transport_write_pending_qf(cmd); - else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) + else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || + cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) transport_complete_qf(cmd); } } @@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, } trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; goto check_stop; default: @@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, } ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; check_stop: @@ -1739,8 +1741,7 @@ check_stop: return; queue_full: - cmd->t_state = TRANSPORT_COMPLETE_QF_OK; - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } EXPORT_SYMBOL(transport_generic_request_failure); @@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd) int ret = 0; transport_complete_task_attr(cmd); + /* + * If a fabric driver ->write_pending() or ->queue_data_in() callback + * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and + * the same callbacks should not be retried. Return CHECK_CONDITION + * if a scsi_status is not already set. + * + * If a fabric driver ->queue_status() has returned non zero, always + * keep retrying no matter what.. + */ + if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { + if (cmd->scsi_status) + goto queue_status; - if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { - trace_target_cmd_complete(cmd); - ret = cmd->se_tfo->queue_status(cmd); - goto out; + cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; + cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; + translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); + goto queue_status; } + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + goto queue_status; + switch (cmd->data_direction) { case DMA_FROM_DEVICE: if (cmd->scsi_status) @@ -2007,19 +2024,33 @@ queue_status: break; } -out: if (ret < 0) { - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); return; } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); } -static void transport_handle_queue_full( - struct se_cmd *cmd, - struct se_device *dev) +static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, + int err, bool write_pending) { + /* + * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or + * ->queue_data_in() callbacks from new process context. + * + * Otherwise for other errors, transport_complete_qf() will send + * CHECK_CONDITION via ->queue_status() instead of attempting to + * retry associated fabric driver data-transfer callbacks. + */ + if (err == -EAGAIN || err == -ENOMEM) { + cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : + TRANSPORT_COMPLETE_QF_OK; + } else { + pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); + cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; + } + spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc_mb(&dev->dev_qf_count); @@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work) WARN_ON(!cmd->scsi_status); ret = transport_send_check_condition_and_sense( cmd, 0, 1); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work) } else if (rc) { ret = transport_send_check_condition_and_sense(cmd, rc, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -2134,7 +2165,7 @@ queue_rsp: if (target_read_prot_action(cmd)) { ret = transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -2144,7 +2175,7 @@ queue_rsp: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; case DMA_TO_DEVICE: @@ -2157,7 +2188,7 @@ queue_rsp: atomic_long_add(cmd->data_length, &cmd->se_lun->lun_stats.tx_data_octets); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; } @@ -2166,7 +2197,7 @@ queue_rsp: queue_status: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; default: @@ -2180,8 +2211,8 @@ queue_status: queue_full: pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," " data_direction: %d\n", cmd, cmd->data_direction); - cmd->t_state = TRANSPORT_COMPLETE_QF_OK; - transport_handle_queue_full(cmd, cmd->se_dev); + + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } void target_free_sgl(struct scatterlist *sgl, int nents) @@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; - /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ - WARN_ON(ret); - - return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return 0; queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); - cmd->t_state = TRANSPORT_COMPLETE_QF_WP; - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); return 0; } EXPORT_SYMBOL(transport_generic_new_cmd); @@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd) int ret; ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) { + if (ret) { pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); } } @@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) __releases(&cmd->t_state_lock) __acquires(&cmd->t_state_lock) { + int ret; + assert_spin_locked(&cmd->t_state_lock); WARN_ON_ONCE(!irqs_disabled()); @@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) trace_target_cmd_complete(cmd); spin_unlock_irq(&cmd->t_state_lock); - cmd->se_tfo->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); spin_lock_irq(&cmd->t_state_lock); return 1; @@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status); void transport_send_task_abort(struct se_cmd *cmd) { unsigned long flags; + int ret; spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { @@ -3090,7 +3122,9 @@ send_abort: cmd->t_task_cdb[0], cmd->tag); trace_target_cmd_complete(cmd); - cmd->se_tfo->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } static void target_tmr_work(struct work_struct *work) diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 2e282461cfa5..730ed3055336 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -117,6 +117,7 @@ enum transport_state_table { TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_OK = 19, + TRANSPORT_COMPLETE_QF_ERR = 20, }; /* Used for struct se_cmd->se_cmd_flags */ -- cgit v1.2.3-71-gd317 From d19c4643a52f0a56a7ccc86b145f207a57f40116 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 29 Mar 2017 00:19:24 -0500 Subject: target: Fix ALUA transition state race between multiple initiators Multiple threads could be writing to alua_access_state at the same time, or there could be multiple STPGs in flight (different initiators sending them or one initiator sending them to different ports), or a combo of both and the core_alua_do_transition_tg_pt calls will race with each other. Because from the last patches we no longer delay running core_alua_do_transition_tg_pt_work, there does not seem to be any point in running that in a workqueue. And, we always wait for it to complete one way or another, so we can sleep in this code path. So, this patch made over target-pending just adds a mutex and does the work core_alua_do_transition_tg_pt_work was doing in core_alua_do_transition_tg_pt. There is also no need to use an atomic for the tg_pt_gp_alua_access_state. In core_alua_do_transition_tg_pt we will test and set it under the transition mutex. And, it is a int/32 bits so in the other places where it is read, we will never see it partially updated. Signed-off-by: Mike Christie Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 136 ++++++++++++---------------------- drivers/target/target_core_configfs.c | 2 +- include/target/target_core_base.h | 8 +- 3 files changed, 51 insertions(+), 95 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fd7c16a7ca6e..fc4a9c303d55 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) /* * Set the ASYMMETRIC ACCESS State */ - buf[off++] |= (atomic_read( - &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); + buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff; /* * Set supported ASYMMETRIC ACCESS State bits */ @@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd) spin_lock(&lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; - out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; // XXX: keeps using tg_pt_gp witout reference after unlock @@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata( } /* - * Called with tg_pt_gp->tg_pt_gp_md_mutex held + * Called with tg_pt_gp->tg_pt_gp_transition_mutex held */ static int core_alua_update_tpg_primary_metadata( struct t10_alua_tg_pt_gp *tg_pt_gp) @@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata( "alua_access_state=0x%02x\n" "alua_access_status=0x%02x\n", tg_pt_gp->tg_pt_gp_id, - tg_pt_gp->tg_pt_gp_alua_pending_state, + tg_pt_gp->tg_pt_gp_alua_access_state, tg_pt_gp->tg_pt_gp_alua_access_status); snprintf(path, ALUA_METADATA_PATH_LEN, @@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) spin_unlock(&tg_pt_gp->tg_pt_gp_lock); } -static void core_alua_do_transition_tg_pt_work(struct work_struct *work) -{ - struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, - struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work); - struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; - bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == - ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); - - /* - * Update the ALUA metadata buf that has been allocated in - * core_alua_do_port_transition(), this metadata will be written - * to struct file. - * - * Note that there is the case where we do not want to update the - * metadata when the saved metadata is being parsed in userspace - * when setting the existing port access state and access status. - * - * Also note that the failure to write out the ALUA metadata to - * struct file does NOT affect the actual ALUA transition. - */ - if (tg_pt_gp->tg_pt_gp_write_metadata) { - mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); - core_alua_update_tpg_primary_metadata(tg_pt_gp); - mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); - } - /* - * Set the current primary ALUA access state to the requested new state - */ - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - tg_pt_gp->tg_pt_gp_alua_pending_state); - - pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" - " from primary access state %s to %s\n", (explicit) ? "explicit" : - "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), - tg_pt_gp->tg_pt_gp_id, - core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), - core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); - - core_alua_queue_state_change_ua(tg_pt_gp); - - spin_lock(&dev->t10_alua.tg_pt_gps_lock); - atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - - if (tg_pt_gp->tg_pt_gp_transition_complete) - complete(tg_pt_gp->tg_pt_gp_transition_complete); -} - static int core_alua_do_transition_tg_pt( struct t10_alua_tg_pt_gp *tg_pt_gp, int new_state, int explicit) { - struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; - DECLARE_COMPLETION_ONSTACK(wait); + int prev_state; + mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex); /* Nothing to be done here */ - if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) + if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return 0; + } - if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) + if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return -EAGAIN; - - /* - * Flush any pending transitions - */ - if (!explicit) - flush_work(&tg_pt_gp->tg_pt_gp_transition_work); + } /* * Save the old primary ALUA access state, and set the current state * to ALUA_ACCESS_STATE_TRANSITION. */ - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_TRANSITION); + prev_state = tg_pt_gp->tg_pt_gp_alua_access_state; + tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION; tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; core_alua_queue_state_change_ua(tg_pt_gp); - if (new_state == ALUA_ACCESS_STATE_TRANSITION) + if (new_state == ALUA_ACCESS_STATE_TRANSITION) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return 0; - - tg_pt_gp->tg_pt_gp_alua_previous_state = - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + } /* * Check for the optional ALUA primary state transition delay @@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt( msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); /* - * Take a reference for workqueue item + * Set the current primary ALUA access state to the requested new state */ - spin_lock(&dev->t10_alua.tg_pt_gps_lock); - atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + tg_pt_gp->tg_pt_gp_alua_access_state = new_state; - schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); - if (explicit) { - tg_pt_gp->tg_pt_gp_transition_complete = &wait; - wait_for_completion(&wait); - tg_pt_gp->tg_pt_gp_transition_complete = NULL; + /* + * Update the ALUA metadata buf that has been allocated in + * core_alua_do_port_transition(), this metadata will be written + * to struct file. + * + * Note that there is the case where we do not want to update the + * metadata when the saved metadata is being parsed in userspace + * when setting the existing port access state and access status. + * + * Also note that the failure to write out the ALUA metadata to + * struct file does NOT affect the actual ALUA transition. + */ + if (tg_pt_gp->tg_pt_gp_write_metadata) { + core_alua_update_tpg_primary_metadata(tg_pt_gp); } + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" + " from primary access state %s to %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id, + core_alua_dump_state(prev_state), + core_alua_dump_state(new_state)); + + core_alua_queue_state_change_ua(tg_pt_gp); + + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return 0; } @@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, } INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); - mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); + mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); - INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work, - core_alua_do_transition_tg_pt_work); tg_pt_gp->tg_pt_gp_dev = dev; - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); + tg_pt_gp->tg_pt_gp_alua_access_state = + ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; /* * Enable both explicit and implicit ALUA support by default */ @@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp( dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - flush_work(&tg_pt_gp->tg_pt_gp_transition_work); - /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by * core_alua_get_tg_pt_gp_by_name() in @@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) "Primary Access Status: %s\nTG Port Secondary Access" " State: %s\nTG Port Secondary Access Status: %s\n", config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, - core_alua_dump_state(atomic_read( - &tg_pt_gp->tg_pt_gp_alua_access_state)), + core_alua_dump_state( + tg_pt_gp->tg_pt_gp_alua_access_state), core_alua_dump_status( tg_pt_gp->tg_pt_gp_alua_access_status), atomic_read(&lun->lun_tg_pt_secondary_offline) ? diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 38b5025e4c7a..70657fd56440 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", - atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); + to_tg_pt_gp(item)->tg_pt_gp_alua_access_state); } static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 730ed3055336..ccfad0e9c2cd 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -280,8 +280,6 @@ struct t10_alua_tg_pt_gp { u16 tg_pt_gp_id; int tg_pt_gp_valid_id; int tg_pt_gp_alua_supported_states; - int tg_pt_gp_alua_pending_state; - int tg_pt_gp_alua_previous_state; int tg_pt_gp_alua_access_status; int tg_pt_gp_alua_access_type; int tg_pt_gp_nonop_delay_msecs; @@ -290,18 +288,16 @@ struct t10_alua_tg_pt_gp { int tg_pt_gp_pref; int tg_pt_gp_write_metadata; u32 tg_pt_gp_members; - atomic_t tg_pt_gp_alua_access_state; + int tg_pt_gp_alua_access_state; atomic_t tg_pt_gp_ref_cnt; spinlock_t tg_pt_gp_lock; - struct mutex tg_pt_gp_md_mutex; + struct mutex tg_pt_gp_transition_mutex; struct se_device *tg_pt_gp_dev; struct config_group tg_pt_gp_group; struct list_head tg_pt_gp_list; struct list_head tg_pt_gp_lun_list; struct se_lun *tg_pt_gp_alua_lun; struct se_node_acl *tg_pt_gp_alua_nacl; - struct work_struct tg_pt_gp_transition_work; - struct completion *tg_pt_gp_transition_complete; }; struct t10_vpd { -- cgit v1.2.3-71-gd317 From bf17aa36c0f199f5b254262e77eaefda7da0f50b Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 1 Mar 2017 18:22:01 -0800 Subject: nvme: Correct NVMF enum values to match NVMe-oF rev 1.0 The enum values for QPTYPE, PRTYPE and CMS are off by 1 from the values defined in figure 42 of the NVM Express over Fabrics 1.0: http://www.nvmexpress.org/wp-content/uploads/NVMe_over_Fabrics_1_0_Gold_20160605-1.pdf Fix our enums to match the final spec. Signed-off-by: Roland Dreier Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg --- include/linux/nvme.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/nvme.h b/include/linux/nvme.h index c43d435d4225..9061780b141f 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -64,26 +64,26 @@ enum { * RDMA_QPTYPE field */ enum { - NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ - NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ + NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */ + NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */ }; /* RDMA QP Service Type codes for Discovery Log Page entry TSAS * RDMA_QPTYPE field */ enum { - NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ - NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ - NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ - NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ - NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ + NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */ + NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */ + NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */ + NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */ + NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */ }; /* RDMA Connection Management Service Type codes for Discovery Log Page * entry TSAS RDMA_CMS field */ enum { - NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ + NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ }; #define NVMF_AQ_DEPTH 32 -- cgit v1.2.3-71-gd317 From 27c0e3748e41ca79171ffa3e97415a20af6facd0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Feb 2017 18:42:24 -0500 Subject: [iov_iter] new privimitive: iov_iter_revert() opposite to iov_iter_advance(); the caller is responsible for never using it to move back past the initial position. Cc: stable@vger.kernel.org Signed-off-by: Al Viro --- include/linux/uio.h | 6 ++++- lib/iov_iter.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/uio.h b/include/linux/uio.h index 804e34c6f981..f2d36a3d3005 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -39,7 +39,10 @@ struct iov_iter { }; union { unsigned long nr_segs; - int idx; + struct { + int idx; + int start_idx; + }; }; }; @@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); +void iov_iter_revert(struct iov_iter *i, size_t bytes); int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, diff --git a/lib/iov_iter.c b/lib/iov_iter.c index e68604ae3ced..60abc44385b7 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size) } EXPORT_SYMBOL(iov_iter_advance); +void iov_iter_revert(struct iov_iter *i, size_t unroll) +{ + if (!unroll) + return; + i->count += unroll; + if (unlikely(i->type & ITER_PIPE)) { + struct pipe_inode_info *pipe = i->pipe; + int idx = i->idx; + size_t off = i->iov_offset; + while (1) { + size_t n = off - pipe->bufs[idx].offset; + if (unroll < n) { + off -= (n - unroll); + break; + } + unroll -= n; + if (!unroll && idx == i->start_idx) { + off = 0; + break; + } + if (!idx--) + idx = pipe->buffers - 1; + off = pipe->bufs[idx].offset + pipe->bufs[idx].len; + } + i->iov_offset = off; + i->idx = idx; + pipe_truncate(i); + return; + } + if (unroll <= i->iov_offset) { + i->iov_offset -= unroll; + return; + } + unroll -= i->iov_offset; + if (i->type & ITER_BVEC) { + const struct bio_vec *bvec = i->bvec; + while (1) { + size_t n = (--bvec)->bv_len; + i->nr_segs++; + if (unroll <= n) { + i->bvec = bvec; + i->iov_offset = n - unroll; + return; + } + unroll -= n; + } + } else { /* same logics for iovec and kvec */ + const struct iovec *iov = i->iov; + while (1) { + size_t n = (--iov)->iov_len; + i->nr_segs++; + if (unroll <= n) { + i->iov = iov; + i->iov_offset = n - unroll; + return; + } + unroll -= n; + } + } +} +EXPORT_SYMBOL(iov_iter_revert); + /* * Return the count of just the current iov_iter segment. */ @@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction, i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); i->iov_offset = 0; i->count = count; + i->start_idx = i->idx; } EXPORT_SYMBOL(iov_iter_pipe); -- cgit v1.2.3-71-gd317 From 47071aee6a1956524b9929b3b821f6d2f8cae23c Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 31 Mar 2017 18:32:10 +0100 Subject: statx: Reserve the top bit of the mask for future struct expansion Reserve the top bit of the mask for future expansion of the statx struct and give an error if statx() sees it set. All the other bits are ignored if we see them set but don't support the bit; we just clear the bit in the returned mask. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/stat.c | 2 ++ include/uapi/linux/stat.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/fs/stat.c b/fs/stat.c index ab27f2868588..0c7e6cdc435c 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -562,6 +562,8 @@ SYSCALL_DEFINE5(statx, struct kstat stat; int error; + if (mask & STATX__RESERVED) + return -EINVAL; if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) return -EINVAL; diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 51a6b86e3700..0869b9eaa8ce 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -152,6 +152,7 @@ struct statx { #define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ #define STATX_BTIME 0x00000800U /* Want/got stx_btime */ #define STATX_ALL 0x00000fffU /* All currently supported flags */ +#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ /* * Attributes to be found in stx_attributes -- cgit v1.2.3-71-gd317 From 3209f68b3ca4667069923a325c88b21131bfdf9f Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 31 Mar 2017 18:32:17 +0100 Subject: statx: Include a mask for stx_attributes in struct statx Include a mask in struct stat to indicate which bits of stx_attributes the filesystem actually supports. This would also be useful if we add another system call that allows you to do a 'bulk attribute set' and pass in a statx struct with the masks appropriately set to say what you want to set. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/ext4/inode.c | 6 ++++++ fs/stat.c | 1 + include/linux/stat.h | 1 + include/uapi/linux/stat.h | 4 ++-- samples/statx/test-statx.c | 12 ++++++++---- 5 files changed, 18 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5d02b922afa3..b9ffa9f4191f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5413,6 +5413,12 @@ int ext4_getattr(const struct path *path, struct kstat *stat, if (flags & EXT4_NODUMP_FL) stat->attributes |= STATX_ATTR_NODUMP; + stat->attributes_mask |= (STATX_ATTR_APPEND | + STATX_ATTR_COMPRESSED | + STATX_ATTR_ENCRYPTED | + STATX_ATTR_IMMUTABLE | + STATX_ATTR_NODUMP); + generic_fillattr(inode, stat); return 0; } diff --git a/fs/stat.c b/fs/stat.c index 0c7e6cdc435c..c6c963b2546b 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -527,6 +527,7 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer) tmp.stx_ino = stat->ino; tmp.stx_size = stat->size; tmp.stx_blocks = stat->blocks; + tmp.stx_attributes_mask = stat->attributes_mask; tmp.stx_atime.tv_sec = stat->atime.tv_sec; tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; tmp.stx_btime.tv_sec = stat->btime.tv_sec; diff --git a/include/linux/stat.h b/include/linux/stat.h index c76e524fb34b..64b6b3aece21 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -26,6 +26,7 @@ struct kstat { unsigned int nlink; uint32_t blksize; /* Preferred I/O size */ u64 attributes; + u64 attributes_mask; #define KSTAT_ATTR_FS_IOC_FLAGS \ (STATX_ATTR_COMPRESSED | \ STATX_ATTR_IMMUTABLE | \ diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 0869b9eaa8ce..d538897b8e08 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -114,7 +114,7 @@ struct statx { __u64 stx_ino; /* Inode number */ __u64 stx_size; /* File size */ __u64 stx_blocks; /* Number of 512-byte blocks allocated */ - __u64 __spare1[1]; + __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */ /* 0x40 */ struct statx_timestamp stx_atime; /* Last access time */ struct statx_timestamp stx_btime; /* File creation time */ @@ -155,7 +155,7 @@ struct statx { #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ /* - * Attributes to be found in stx_attributes + * Attributes to be found in stx_attributes and masked in stx_attributes_mask. * * These give information about the features or the state of a file that might * be of use to ordinary userspace programs such as GUIs or ls rather than diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c index 8571d766331d..d4d77b09412c 100644 --- a/samples/statx/test-statx.c +++ b/samples/statx/test-statx.c @@ -141,8 +141,8 @@ static void dump_statx(struct statx *stx) if (stx->stx_mask & STATX_BTIME) print_time(" Birth: ", &stx->stx_btime); - if (stx->stx_attributes) { - unsigned char bits; + if (stx->stx_attributes_mask) { + unsigned char bits, mbits; int loop, byte; static char attr_representation[64 + 1] = @@ -160,14 +160,18 @@ static void dump_statx(struct statx *stx) printf("Attributes: %016llx (", stx->stx_attributes); for (byte = 64 - 8; byte >= 0; byte -= 8) { bits = stx->stx_attributes >> byte; + mbits = stx->stx_attributes_mask >> byte; for (loop = 7; loop >= 0; loop--) { int bit = byte + loop; - if (bits & 0x80) + if (!(mbits & 0x80)) + putchar('.'); /* Not supported */ + else if (bits & 0x80) putchar(attr_representation[63 - bit]); else - putchar('-'); + putchar('-'); /* Not set */ bits <<= 1; + mbits <<= 1; } if (byte) putchar(' '); -- cgit v1.2.3-71-gd317 From df2729c3238ed89fb8ccf850d38c732858a5bade Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 1 Apr 2017 17:15:59 +0800 Subject: sctp: check for dst and pathmtu update in sctp_packet_config This patch is to move sctp_transport_dst_check into sctp_packet_config from sctp_packet_transmit and add pathmtu check in sctp_packet_config. With this fix, sctp can update dst or pathmtu before appending chunks, which can void dropping packets in sctp_packet_transmit when dst is obsolete or dst's mtu is changed. This patch is also to improve some other codes in sctp_packet_config. It updates packet max_size with gso_max_size, checks for dst and pathmtu, and appends ecne chunk only when packet is empty and asoc is not NULL. It makes sctp flush work better, as we only need to set up them once for one flush schedule. It's also safe, since asoc is NULL only when the packet is created by sctp_ootb_pkt_new in which it just gets the new dst, no need to do more things for it other than set packet with transport's pathmtu. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 17 ++++++++++--- net/sctp/output.c | 67 ++++++++++++++++++++++++++----------------------- 2 files changed, 50 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 1f71ee5ab518..d75caa7a629b 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -596,12 +596,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) */ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { - if (t->dst && (!dst_check(t->dst, t->dst_cookie) || - t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), - SCTP_DEFAULT_MINSEGMENT))) + if (t->dst && !dst_check(t->dst, t->dst_cookie)) sctp_transport_dst_release(t); return t->dst; } +static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) +{ + __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), + SCTP_DEFAULT_MINSEGMENT); + + if (t->pathmtu == pmtu) + return true; + + t->pathmtu = pmtu; + + return false; +} + #endif /* __net_sctp_h__ */ diff --git a/net/sctp/output.c b/net/sctp/output.c index 73fd178007a3..ec4d50a38713 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; + struct sock *sk; pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); - packet->vtag = vtag; - if (asoc && tp->dst) { - struct sock *sk = asoc->base.sk; - - rcu_read_lock(); - if (__sk_dst_get(sk) != tp->dst) { - dst_hold(tp->dst); - sk_setup_caps(sk, tp->dst); - } - - if (sk_can_gso(sk)) { - struct net_device *dev = tp->dst->dev; + /* do the following jobs only once for a flush schedule */ + if (!sctp_packet_empty(packet)) + return; - packet->max_size = dev->gso_max_size; - } else { - packet->max_size = asoc->pathmtu; - } - rcu_read_unlock(); + /* set packet max_size with pathmtu */ + packet->max_size = tp->pathmtu; + if (!asoc) + return; - } else { - packet->max_size = tp->pathmtu; + /* update dst or transport pathmtu if in need */ + sk = asoc->base.sk; + if (!sctp_transport_dst_check(tp)) { + sctp_transport_route(tp, NULL, sctp_sk(sk)); + if (asoc->param_flags & SPP_PMTUD_ENABLE) + sctp_assoc_sync_pmtu(sk, asoc); + } else if (!sctp_transport_pmtu_check(tp)) { + if (asoc->param_flags & SPP_PMTUD_ENABLE) + sctp_assoc_sync_pmtu(sk, asoc); } - if (ecn_capable && sctp_packet_empty(packet)) { - struct sctp_chunk *chunk; + /* If there a is a prepend chunk stick it on the list before + * any other chunks get appended. + */ + if (ecn_capable) { + struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); - /* If there a is a prepend chunk stick it on the list before - * any other chunks get appended. - */ - chunk = sctp_get_ecne_prepend(asoc); if (chunk) sctp_packet_append_chunk(packet, chunk); } + + if (!tp->dst) + return; + + /* set packet max_size with gso_max_size if gso is enabled*/ + rcu_read_lock(); + if (__sk_dst_get(sk) != tp->dst) { + dst_hold(tp->dst); + sk_setup_caps(sk, tp->dst); + } + packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size + : asoc->pathmtu; + rcu_read_unlock(); } /* Initialize the packet structure. */ @@ -582,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) sh->vtag = htonl(packet->vtag); sh->checksum = 0; - /* update dst if in need */ - if (!sctp_transport_dst_check(tp)) { - sctp_transport_route(tp, NULL, sctp_sk(sk)); - if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE) - sctp_assoc_sync_pmtu(sk, asoc); - } + /* drop packet if no dst */ dst = dst_clone(tp->dst); if (!dst) { IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); -- cgit v1.2.3-71-gd317 From 5b0d2cc2805897c14257f6dbb949639c499c3c25 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sat, 18 Mar 2017 13:56:56 +0100 Subject: KVM: arm64: Ensure LRs are clear when they should be We currently have some code to clear the list registers on GICv3, but we never call this code, because the caller got nuked when removing the old vgic. We also used to have a similar GICv2 part, but that got lost in the process too. Let's reintroduce the logic for GICv2 and call the logic when we initialize the use of hypervisors on the CPU, for example when first loading KVM or when exiting a low power state. Reviewed-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 3 +++ include/kvm/arm_vgic.h | 1 + virt/kvm/arm/vgic/vgic-init.c | 19 +++++++++++++++++++ virt/kvm/arm/vgic/vgic-v2.c | 15 +++++++++++++++ virt/kvm/arm/vgic/vgic.h | 2 ++ 5 files changed, 40 insertions(+) (limited to 'include') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 96dba7cd8be7..314eb6abe1ff 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void) if (__hyp_get_vectors() == hyp_default_vectors) cpu_init_hyp_mode(NULL); } + + if (vgic_present) + kvm_vgic_init_cpu_hardware(); } static void cpu_hyp_reset(void) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index b72dd2ad5f44..c0b3d999c266 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu); void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); +void kvm_vgic_init_cpu_hardware(void); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, bool level); diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 276139a24e6f..702f8108608d 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -391,6 +391,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) return IRQ_HANDLED; } +/** + * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware + * + * For a specific CPU, initialize the GIC VE hardware. + */ +void kvm_vgic_init_cpu_hardware(void) +{ + BUG_ON(preemptible()); + + /* + * We want to make sure the list registers start out clear so that we + * only have the program the used registers. + */ + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_init_lrs(); + else + kvm_call_hyp(__vgic_v3_init_lrs); +} + /** * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable * according to the host GIC model. Accordingly calls either diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index b834ecdf3225..94cf4b9b6471 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val) return (unsigned long *)val; } +static inline void vgic_v2_write_lr(int lr, u32 val) +{ + void __iomem *base = kvm_vgic_global_state.vctrl_base; + + writel_relaxed(val, base + GICH_LR0 + (lr * 4)); +} + +void vgic_v2_init_lrs(void) +{ + int i; + + for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) + vgic_v2_write_lr(i, 0); +} + void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) { struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index db28f7cadab2..91566f5aac9b 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -130,6 +130,8 @@ int vgic_v2_map_resources(struct kvm *kvm); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); +void vgic_v2_init_lrs(void); + static inline void vgic_get_irq_kref(struct vgic_irq *irq) { if (irq->intid < VGIC_MIN_LPI) -- cgit v1.2.3-71-gd317 From 6d56111c92d247bb64301029fe88365aa4caf16e Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 21 Mar 2017 22:05:22 +0100 Subject: KVM: arm/arm64: vgic: Fix GICC_PMR uaccess on GICv3 and clarify ABI As an oversight, for GICv2, we accidentally export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask field in the lower 5 bits of a word, meaning that userspace must always use the lower 5 bits to communicate with the KVM device and must shift the value left by 3 places to obtain the actual priority mask level. Since GICv3 supports the full 8 bits of priority masking in the ICH_VMCR, we have to fix the value we export when emulating a GICv2 on top of a hardware GICv3 and exporting the emulated GICv2 state to userspace. Take the chance to clarify this aspect of the ABI. Reviewed-by: Marc Zyngier Signed-off-by: Christoffer Dall --- Documentation/virtual/kvm/devices/arm-vgic.txt | 6 ++++++ include/linux/irqchip/arm-gic.h | 3 +++ virt/kvm/arm/vgic/vgic-mmio-v2.c | 20 ++++++++++++++++++-- virt/kvm/arm/vgic/vgic-v2.c | 8 ++++---- virt/kvm/arm/vgic/vgic.h | 9 ++++++++- 5 files changed, 39 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt index 76e61c883347..b2f60ca8b60c 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic.txt +++ b/Documentation/virtual/kvm/devices/arm-vgic.txt @@ -83,6 +83,12 @@ Groups: Bits for undefined preemption levels are RAZ/WI. + For historical reasons and to provide ABI compatibility with userspace we + export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask + field in the lower 5 bits of a word, meaning that userspace must always + use the lower 5 bits to communicate with the KVM device and must shift the + value left by 3 places to obtain the actual priority mask level. + Limitations: - Priorities are not implemented, and registers are RAZ/WI - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2. diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index eafc965b3eb8..dc30f3d057eb 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -96,6 +96,9 @@ #define GICH_MISR_EOI (1 << 0) #define GICH_MISR_U (1 << 1) +#define GICV_PMR_PRIORITY_SHIFT 3 +#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT) + #ifndef __ASSEMBLY__ #include diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index a3ad7ff95c9b..0a4283ed9aa7 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu, val = vmcr.ctlr; break; case GIC_CPU_PRIMASK: - val = vmcr.pmr; + /* + * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the + * the PMR field as GICH_VMCR.VMPriMask rather than + * GICC_PMR.Priority, so we expose the upper five bits of + * priority mask to userspace using the lower bits in the + * unsigned long. + */ + val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >> + GICV_PMR_PRIORITY_SHIFT; break; case GIC_CPU_BINPOINT: val = vmcr.bpr; @@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, vmcr.ctlr = val; break; case GIC_CPU_PRIMASK: - vmcr.pmr = val; + /* + * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the + * the PMR field as GICH_VMCR.VMPriMask rather than + * GICC_PMR.Priority, so we expose the upper five bits of + * priority mask to userspace using the lower bits in the + * unsigned long. + */ + vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) & + GICV_PMR_PRIORITY_MASK; break; case GIC_CPU_BINPOINT: vmcr.bpr = val; diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 94cf4b9b6471..b637d9c7afe3 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -206,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) GICH_VMCR_ALIAS_BINPOINT_MASK; vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; - vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & - GICH_VMCR_PRIMASK_MASK; + vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) << + GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; } @@ -222,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) GICH_VMCR_ALIAS_BINPOINT_SHIFT; vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT; - vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> - GICH_VMCR_PRIMASK_SHIFT; + vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >> + GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT; } void vgic_v2_enable(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 91566f5aac9b..6cf557e9f718 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq) return irq->pending_latch || irq->line_level; } +/* + * This struct provides an intermediate representation of the fields contained + * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC + * state to userspace can generate either GICv2 or GICv3 CPU interface + * registers regardless of the hardware backed GIC used. + */ struct vgic_vmcr { u32 ctlr; u32 abpr; u32 bpr; - u32 pmr; + u32 pmr; /* Priority mask field in the GICC_PMR and + * ICC_PMR_EL1 priority field format */ /* Below member variable are valid only for GICv3 */ u32 grpen0; u32 grpen1; -- cgit v1.2.3-71-gd317 From 5ec1a96010aa7aff19adba92a8163ef8f8c5c1ad Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 18 Nov 2016 16:55:48 +0200 Subject: drm: Add data transmission order bus flag The flags indicate whether data is transmitted LSB to MSB or MSB to LSB on the bus. The exact meaning is bus-type dependent. For instance, for LVDS buses the flags indicate whether the seven data bits transmitted in a clock pulse are sent in normal order (MSB to LSB, slots 0 to 6) or reverse order (LSB to MSB, slots 6 to 0). Signed-off-by: Laurent Pinchart Reviewed-by: Thierry Reding --- include/drm/drm_connector.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 941f57f311aa..8250062eb8a5 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -224,6 +224,10 @@ struct drm_display_info { #define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2) /* drive data on neg. edge */ #define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3) +/* data is transmitted MSB to LSB on the bus */ +#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4) +/* data is transmitted LSB to MSB on the bus */ +#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5) /** * @bus_flags: Additional information (like pixel signal polarity) for -- cgit v1.2.3-71-gd317 From 62e24c5775ecb387a3eb33701378ccfa6dbc98ee Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Fri, 5 Feb 2016 13:41:39 +0100 Subject: reset: add exported __reset_control_get, return NULL if optional Rename the internal __reset_control_get/put functions to __reset_control_get/put_internal and add an exported __reset_control_get equivalent to __of_reset_control_get that takes a struct device parameter. This avoids the confusing call to __of_reset_control_get in the non-DT case and fixes the devm_reset_control_get_optional function to return NULL if RESET_CONTROLLER is enabled but dev->of_node == NULL. Fixes: bb475230b8e5 ("reset: make optional functions really optional") Reported-by: Andy Shevchenko Tested-by: Andy Shevchenko Cc: Ramiro Oliveira Signed-off-by: Philipp Zabel --- drivers/reset/core.c | 22 ++++++++++++++++------ include/linux/reset.h | 22 ++++++++++++++-------- 2 files changed, 30 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/reset/core.c b/drivers/reset/core.c index f1e5e65388bb..cd739d2fa160 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc) } EXPORT_SYMBOL_GPL(reset_control_status); -static struct reset_control *__reset_control_get( +static struct reset_control *__reset_control_get_internal( struct reset_controller_dev *rcdev, unsigned int index, bool shared) { @@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get( return rstc; } -static void __reset_control_put(struct reset_control *rstc) +static void __reset_control_put_internal(struct reset_control *rstc) { lockdep_assert_held(&reset_list_mutex); @@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node, } /* reset_list_mutex also protects the rcdev's reset_control list */ - rstc = __reset_control_get(rcdev, rstc_id, shared); + rstc = __reset_control_get_internal(rcdev, rstc_id, shared); mutex_unlock(&reset_list_mutex); @@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node, } EXPORT_SYMBOL_GPL(__of_reset_control_get); +struct reset_control *__reset_control_get(struct device *dev, const char *id, + int index, bool shared, bool optional) +{ + if (dev->of_node) + return __of_reset_control_get(dev->of_node, id, index, shared, + optional); + + return optional ? NULL : ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(__reset_control_get); + /** * reset_control_put - free the reset controller * @rstc: reset controller @@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc) return; mutex_lock(&reset_list_mutex); - __reset_control_put(rstc); + __reset_control_put_internal(rstc); mutex_unlock(&reset_list_mutex); } EXPORT_SYMBOL_GPL(reset_control_put); @@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev, if (!ptr) return ERR_PTR(-ENOMEM); - rstc = __of_reset_control_get(dev ? dev->of_node : NULL, - id, index, shared, optional); + rstc = __reset_control_get(dev, id, index, shared, optional); if (!IS_ERR(rstc)) { *ptr = rstc; devres_add(dev, ptr); diff --git a/include/linux/reset.h b/include/linux/reset.h index 96fb139bdd08..13d8681210d5 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc); struct reset_control *__of_reset_control_get(struct device_node *node, const char *id, int index, bool shared, bool optional); +struct reset_control *__reset_control_get(struct device *dev, const char *id, + int index, bool shared, + bool optional); void reset_control_put(struct reset_control *rstc); struct reset_control *__devm_reset_control_get(struct device *dev, const char *id, int index, bool shared, @@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get( return optional ? NULL : ERR_PTR(-ENOTSUPP); } +static inline struct reset_control *__reset_control_get( + struct device *dev, const char *id, + int index, bool shared, bool optional) +{ + return optional ? NULL : ERR_PTR(-ENOTSUPP); +} + static inline struct reset_control *__devm_reset_control_get( struct device *dev, const char *id, int index, bool shared, bool optional) @@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) #ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); #endif - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, - false); + return __reset_control_get(dev, id, 0, false, false); } /** @@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, - false); + return __reset_control_get(dev, id, 0, true, false); } static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, - true); + return __reset_control_get(dev, id, 0, false, true); } static inline struct reset_control *reset_control_get_optional_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, - true); + return __reset_control_get(dev, id, 0, true, true); } /** -- cgit v1.2.3-71-gd317 From ea642c3216cb2a60d1c0e760ae47ee85c9c16447 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 28 Mar 2017 16:54:50 +0200 Subject: drm/ttm: add io_mem_pfn callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows the driver to handle io_mem mappings on their own. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Nicolai Hähnle Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 + drivers/gpu/drm/ast/ast_ttm.c | 1 + drivers/gpu/drm/bochs/bochs_mm.c | 1 + drivers/gpu/drm/cirrus/cirrus_ttm.c | 1 + drivers/gpu/drm/mgag200/mgag200_ttm.c | 1 + drivers/gpu/drm/nouveau/nouveau_bo.c | 1 + drivers/gpu/drm/qxl/qxl_ttm.c | 1 + drivers/gpu/drm/radeon/radeon_ttm.c | 1 + drivers/gpu/drm/ttm/ttm_bo_vm.c | 10 +++++++++- drivers/gpu/drm/virtio/virtgpu_ttm.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 1 + include/drm/ttm/ttm_bo_api.h | 11 +++++++++++ include/drm/ttm/ttm_bo_driver.h | 9 +++++++++ 13 files changed, 39 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 244bb9aacf86..7b6463b288aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1089,6 +1089,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_free = &amdgpu_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int amdgpu_ttm_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 50c910efa13d..e879496b8a42 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -236,6 +236,7 @@ struct ttm_bo_driver ast_bo_driver = { .verify_access = ast_bo_verify_access, .io_mem_reserve = &ast_ttm_io_mem_reserve, .io_mem_free = &ast_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int ast_mm_init(struct ast_private *ast) diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 857755ac2d70..c4cadb638460 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -205,6 +205,7 @@ struct ttm_bo_driver bochs_bo_driver = { .verify_access = bochs_bo_verify_access, .io_mem_reserve = &bochs_ttm_io_mem_reserve, .io_mem_free = &bochs_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int bochs_mm_init(struct bochs_device *bochs) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index f53aa8f4a143..93dbcd38355d 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -236,6 +236,7 @@ struct ttm_bo_driver cirrus_bo_driver = { .verify_access = cirrus_bo_verify_access, .io_mem_reserve = &cirrus_ttm_io_mem_reserve, .io_mem_free = &cirrus_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int cirrus_mm_init(struct cirrus_device *cirrus) diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 657598bb1e6b..565a217b46f2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -236,6 +236,7 @@ struct ttm_bo_driver mgag200_bo_driver = { .verify_access = mgag200_bo_verify_access, .io_mem_reserve = &mgag200_ttm_io_mem_reserve, .io_mem_free = &mgag200_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int mgag200_mm_init(struct mga_device *mdev) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 548f36d33924..e427f80344c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1574,6 +1574,7 @@ struct ttm_bo_driver nouveau_bo_driver = { .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; struct nvkm_vma * diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 7d1cab57c89e..0fdedee4509d 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -393,6 +393,7 @@ static struct ttm_bo_driver qxl_bo_driver = { .verify_access = &qxl_verify_access, .io_mem_reserve = &qxl_ttm_io_mem_reserve, .io_mem_free = &qxl_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, .move_notify = &qxl_bo_move_notify, }; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 684f1703aa5c..bfb06469db46 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -873,6 +873,7 @@ static struct ttm_bo_driver radeon_bo_driver = { .fault_reserve_notify = &radeon_bo_fault_reserve_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, .io_mem_free = &radeon_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int radeon_ttm_init(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 35ffb3754feb..9f53df95f35c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -231,7 +231,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) */ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { if (bo->mem.bus.is_iomem) - pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; + pfn = bdev->driver->io_mem_pfn(bo, page_offset); else { page = ttm->pages[page_offset]; if (unlikely(!page && i == 0)) { @@ -324,6 +324,14 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, return bo; } +unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, + unsigned long page_offset) +{ + return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + + page_offset; +} +EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn); + int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev) { diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 70ec8ca8d9b1..4e8e27d50922 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -431,6 +431,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = { .verify_access = &virtio_gpu_verify_access, .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve, .io_mem_free = &virtio_gpu_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, .move_notify = &virtio_gpu_bo_move_notify, .swap_notify = &virtio_gpu_bo_swap_notify, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 4c7f24a67a2e..35bf781e418e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -859,4 +859,5 @@ struct ttm_bo_driver vmw_bo_driver = { .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, .io_mem_free = &vmw_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0b1ce05e2c2e..fa07be197945 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -710,6 +710,17 @@ extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); extern int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo); +/** + * ttm_bo_default_iomem_pfn - get a pfn for a page offset + * + * @bo: the BO we need to look up the pfn for + * @page_offset: offset inside the BO to look up. + * + * Calculate the PFN for iomem based mappings during page fault + */ +unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, + unsigned long page_offset); + /** * ttm_bo_mmap - mmap out of the ttm device address space. * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 3641c6128ac2..6bbd34d25a8d 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -462,6 +462,15 @@ struct ttm_bo_driver { struct ttm_mem_reg *mem); void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); + + /** + * Return the pfn for a given page_offset inside the BO. + * + * @bo: the BO to look up the pfn for + * @page_offset: the offset to look up + */ + unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, + unsigned long page_offset); }; /** -- cgit v1.2.3-71-gd317 From c8b26bd1d2997e20b5f911c04b1dd2266256b976 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 29 Mar 2017 12:13:54 +0200 Subject: drm/ttm: add TTM_PL_FLAG_CONTIGUOUS v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows drivers to specify if they need a contiguous allocation or not. v2: use space instead of tab Signed-off-by: Christian König Reviewed-by: Nicolai Hähnle Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 4 +++- include/drm/ttm/ttm_placement.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 99d2a2c6a4ec..e44626a2e698 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1036,7 +1036,9 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, *new_flags = heap->flags; if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && - (*new_flags & mem->placement & TTM_PL_MASK_MEM)) + (*new_flags & mem->placement & TTM_PL_MASK_MEM) && + (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || + (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) return true; } return false; diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index 932be0c8086e..e88a8e39767b 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -63,6 +63,7 @@ #define TTM_PL_FLAG_CACHED (1 << 16) #define TTM_PL_FLAG_UNCACHED (1 << 17) #define TTM_PL_FLAG_WC (1 << 18) +#define TTM_PL_FLAG_CONTIGUOUS (1 << 19) #define TTM_PL_FLAG_NO_EVICT (1 << 21) #define TTM_PL_FLAG_TOPDOWN (1 << 22) -- cgit v1.2.3-71-gd317 From b2376407f98920c9b0c411948675f58a9640be35 Mon Sep 17 00:00:00 2001 From: Vic Yang Date: Fri, 24 Mar 2017 18:44:01 +0100 Subject: mfd: cros-ec: Fix host command buffer size For SPI, we can get up to 32 additional bytes for response preamble. The current overhead (2 bytes) may cause problems when we try to receive a big response. Update it to 32 bytes. Without this fix we could see a kernel BUG when we receive a big response from the Chrome EC when is connected via SPI. Signed-off-by: Vic Yang Tested-by: Enric Balletbo i Serra Signed-off-by: Lee Jones --- include/linux/mfd/cros_ec.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 7a01c94496f1..3eef9fb9968a 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -35,10 +35,11 @@ * Max bus-specific overhead incurred by request/responses. * I2C requires 1 additional byte for requests. * I2C requires 2 additional bytes for responses. + * SPI requires up to 32 additional bytes for responses. * */ #define EC_PROTO_VERSION_UNKNOWN 0 #define EC_MAX_REQUEST_OVERHEAD 1 -#define EC_MAX_RESPONSE_OVERHEAD 2 +#define EC_MAX_RESPONSE_OVERHEAD 32 /* * Command interface between EC and AP, for LPC, I2C and SPI interfaces. -- cgit v1.2.3-71-gd317 From 3ebfdf082184d04f6e73b30cd9446613dc7f8c02 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 4 Apr 2017 13:39:55 +0800 Subject: sctp: get sock from transport in sctp_transport_update_pmtu This patch is almost to revert commit 02f3d4ce9e81 ("sctp: Adjust PMTU updates to accomodate route invalidation."). As t->asoc can't be NULL in sctp_transport_update_pmtu, it could get sk from asoc, and no need to pass sk into that function. It is also to remove some duplicated codes from that function. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 5 ++--- include/net/sctp/structs.h | 6 +++--- net/sctp/associola.c | 6 +++--- net/sctp/input.c | 4 ++-- net/sctp/output.c | 4 ++-- net/sctp/socket.c | 6 +++--- net/sctp/transport.c | 19 +++++++------------ 7 files changed, 22 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index d75caa7a629b..069582ee5d7f 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu) return frag; } -static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc) +static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc) { - - sctp_assoc_sync_pmtu(sk, asoc); + sctp_assoc_sync_pmtu(asoc); asoc->pmtu_pending = 0; } diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index a127b7c2c3c9..138f8615acf0 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -952,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t); void sctp_transport_burst_limited(struct sctp_transport *); void sctp_transport_burst_reset(struct sctp_transport *); unsigned long sctp_transport_timeout(struct sctp_transport *); -void sctp_transport_reset(struct sctp_transport *); -void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); +void sctp_transport_reset(struct sctp_transport *t); +void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); void sctp_transport_immediate_rtx(struct sctp_transport *); void sctp_transport_dst_release(struct sctp_transport *t); void sctp_transport_dst_confirm(struct sctp_transport *t); @@ -1954,7 +1954,7 @@ void sctp_assoc_update(struct sctp_association *old, __u32 sctp_association_get_next_tsn(struct sctp_association *); -void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); +void sctp_assoc_sync_pmtu(struct sctp_association *asoc); void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); void sctp_assoc_set_primary(struct sctp_association *, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 0b26df5f6188..a9708da28eb5 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1412,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc, /* Update the association's pmtu and frag_point by going through all the * transports. This routine is called when a transport's PMTU has changed. */ -void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) +void sctp_assoc_sync_pmtu(struct sctp_association *asoc) { struct sctp_transport *t; __u32 pmtu = 0; @@ -1424,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (t->pmtu_pending && t->dst) { - sctp_transport_update_pmtu(sk, t, - SCTP_TRUNC4(dst_mtu(t->dst))); + sctp_transport_update_pmtu( + t, SCTP_TRUNC4(dst_mtu(t->dst))); t->pmtu_pending = 0; } if (!pmtu || (t->pathmtu < pmtu)) diff --git a/net/sctp/input.c b/net/sctp/input.c index 2a28ab20487f..0e06a278d2a9 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, if (t->param_flags & SPP_PMTUD_ENABLE) { /* Update transports view of the MTU */ - sctp_transport_update_pmtu(sk, t, pmtu); + sctp_transport_update_pmtu(t, pmtu); /* Update association pmtu. */ - sctp_assoc_sync_pmtu(sk, asoc); + sctp_assoc_sync_pmtu(asoc); } /* Retransmit with the new pmtu setting. diff --git a/net/sctp/output.c b/net/sctp/output.c index ec4d50a38713..1409a875ad8e 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -105,10 +105,10 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, if (!sctp_transport_dst_check(tp)) { sctp_transport_route(tp, NULL, sctp_sk(sk)); if (asoc->param_flags & SPP_PMTUD_ENABLE) - sctp_assoc_sync_pmtu(sk, asoc); + sctp_assoc_sync_pmtu(asoc); } else if (!sctp_transport_pmtu_check(tp)) { if (asoc->param_flags & SPP_PMTUD_ENABLE) - sctp_assoc_sync_pmtu(sk, asoc); + sctp_assoc_sync_pmtu(asoc); } /* If there a is a prepend chunk stick it on the list before diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 12fbae2c1002..c1401f43d40f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1907,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) } if (asoc->pmtu_pending) - sctp_assoc_pending_pmtu(sk, asoc); + sctp_assoc_pending_pmtu(asoc); /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D @@ -2435,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; - sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); + sctp_assoc_sync_pmtu(asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; } else { @@ -2451,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); - sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); + sctp_assoc_sync_pmtu(asoc); } } else if (asoc) { asoc->param_flags = diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 3379668af368..721eeebfcd8a 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } -void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) +void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { - struct dst_entry *dst; + struct dst_entry *dst = sctp_transport_dst_check(t); if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", - __func__, pmtu, - SCTP_DEFAULT_MINSEGMENT); + __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); /* Use default minimum segment size and disable * pmtu discovery on this transport. */ @@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p t->pathmtu = pmtu; } - dst = sctp_transport_dst_check(t); - if (!dst) - t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); - if (dst) { - dst->ops->update_pmtu(dst, sk, NULL, pmtu); - + dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); dst = sctp_transport_dst_check(t); - if (!dst) - t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); } + + if (!dst) + t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); } /* Caches the dst entry and source address for a transport's destination -- cgit v1.2.3-71-gd317 From 6118714275f0a313ecc296a87ed1af32d9691bed Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 30 Mar 2017 09:16:39 -0700 Subject: pinctrl: core: Fix pinctrl_register_and_init() with pinctrl_enable() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Recent pinctrl changes to allow dynamic allocation of pins exposed one more issue with the pinctrl pins claimed early by the controller itself. This caused a regression for IMX6 pinctrl hogs. Before enabling the pin controller driver we need to wait until it has been properly initialized, then claim the hogs, and only then enable it. To fix the regression, split the code into pinctrl_claim_hogs() and pinctrl_enable(). And then let's require that pinctrl_enable() is always called by the pin controller driver when ready after calling pinctrl_register_and_init(). Depends-on: 950b0d91dc10 ("pinctrl: core: Fix regression caused by delayed work for hogs") Fixes: df61b366af26 ("pinctrl: core: Use delayed work for hogs") Fixes: e566fc11ea76 ("pinctrl: imx: use generic pinctrl helpers for managing groups") Cc: Haojian Zhuang Cc: Masahiro Yamada Cc: Mika Penttilä Cc: Mika Westerberg Cc: Nishanth Menon Cc: Shawn Guo Cc: Stefan Agner Tested-by: Geert Uytterhoeven Tested-by: Gary Bisson Tested-by: Fabio Estevam Signed-off-by: Tony Lindgren Signed-off-by: Linus Walleij --- Documentation/pinctrl.txt | 8 ++- drivers/pinctrl/core.c | 97 +++++++++++++++++++++------------ drivers/pinctrl/freescale/pinctrl-imx.c | 2 +- drivers/pinctrl/pinctrl-single.c | 2 +- drivers/pinctrl/sh-pfc/pinctrl.c | 11 +++- drivers/pinctrl/ti/pinctrl-ti-iodelay.c | 2 + include/linux/pinctrl/pinctrl.h | 3 +- 7 files changed, 83 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt index 54bd5faa8782..f2af35f6d6b2 100644 --- a/Documentation/pinctrl.txt +++ b/Documentation/pinctrl.txt @@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = { int __init foo_probe(void) { + int error; + struct pinctrl_dev *pctl; - return pinctrl_register_and_init(&foo_desc, , NULL, &pctl); + error = pinctrl_register_and_init(&foo_desc, , NULL, &pctl); + if (error) + return error; + + return pinctrl_enable(pctl); } To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index d69046537b75..32822b0d9cd0 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -2010,29 +2010,57 @@ out_err: return ERR_PTR(ret); } -static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) +static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev) { pctldev->p = create_pinctrl(pctldev->dev, pctldev); - if (!IS_ERR(pctldev->p)) { - kref_get(&pctldev->p->users); - pctldev->hog_default = - pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT); - if (IS_ERR(pctldev->hog_default)) { - dev_dbg(pctldev->dev, - "failed to lookup the default state\n"); - } else { - if (pinctrl_select_state(pctldev->p, - pctldev->hog_default)) - dev_err(pctldev->dev, - "failed to select default state\n"); - } + if (PTR_ERR(pctldev->p) == -ENODEV) { + dev_dbg(pctldev->dev, "no hogs found\n"); - pctldev->hog_sleep = - pinctrl_lookup_state(pctldev->p, - PINCTRL_STATE_SLEEP); - if (IS_ERR(pctldev->hog_sleep)) - dev_dbg(pctldev->dev, - "failed to lookup the sleep state\n"); + return 0; + } + + if (IS_ERR(pctldev->p)) { + dev_err(pctldev->dev, "error claiming hogs: %li\n", + PTR_ERR(pctldev->p)); + + return PTR_ERR(pctldev->p); + } + + kref_get(&pctldev->p->users); + pctldev->hog_default = + pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT); + if (IS_ERR(pctldev->hog_default)) { + dev_dbg(pctldev->dev, + "failed to lookup the default state\n"); + } else { + if (pinctrl_select_state(pctldev->p, + pctldev->hog_default)) + dev_err(pctldev->dev, + "failed to select default state\n"); + } + + pctldev->hog_sleep = + pinctrl_lookup_state(pctldev->p, + PINCTRL_STATE_SLEEP); + if (IS_ERR(pctldev->hog_sleep)) + dev_dbg(pctldev->dev, + "failed to lookup the sleep state\n"); + + return 0; +} + +int pinctrl_enable(struct pinctrl_dev *pctldev) +{ + int error; + + error = pinctrl_claim_hogs(pctldev); + if (error) { + dev_err(pctldev->dev, "could not claim hogs: %i\n", + error); + mutex_destroy(&pctldev->mutex); + kfree(pctldev); + + return error; } mutex_lock(&pinctrldev_list_mutex); @@ -2043,6 +2071,7 @@ static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) return 0; } +EXPORT_SYMBOL_GPL(pinctrl_enable); /** * pinctrl_register() - register a pin controller device @@ -2065,25 +2094,30 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, if (IS_ERR(pctldev)) return pctldev; - error = pinctrl_create_and_start(pctldev); - if (error) { - mutex_destroy(&pctldev->mutex); - kfree(pctldev); - + error = pinctrl_enable(pctldev); + if (error) return ERR_PTR(error); - } return pctldev; } EXPORT_SYMBOL_GPL(pinctrl_register); +/** + * pinctrl_register_and_init() - register and init pin controller device + * @pctldesc: descriptor for this pin controller + * @dev: parent device for this pin controller + * @driver_data: private pin controller data for this pin controller + * @pctldev: pin controller device + * + * Note that pinctrl_enable() still needs to be manually called after + * this once the driver is ready. + */ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data, struct pinctrl_dev **pctldev) { struct pinctrl_dev *p; - int error; p = pinctrl_init_controller(pctldesc, dev, driver_data); if (IS_ERR(p)) @@ -2097,15 +2131,6 @@ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, */ *pctldev = p; - error = pinctrl_create_and_start(p); - if (error) { - mutex_destroy(&p->mutex); - kfree(p); - *pctldev = NULL; - - return error; - } - return 0; } EXPORT_SYMBOL_GPL(pinctrl_register_and_init); diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index a7ace9e1ad81..74bd90dfd7b1 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -790,7 +790,7 @@ int imx_pinctrl_probe(struct platform_device *pdev, dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); - return 0; + return pinctrl_enable(ipctl->pctl); free: imx_free_resources(ipctl); diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 8b2d45e85bae..9c267dcda094 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1781,7 +1781,7 @@ static int pcs_probe(struct platform_device *pdev) dev_info(pcs->dev, "%i pins at pa %p size %u\n", pcs->desc.npins, pcs->base, pcs->size); - return 0; + return pinctrl_enable(pcs->pctl); free: pcs_free_resources(pcs); diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c index 08150a321be6..a70157f0acf4 100644 --- a/drivers/pinctrl/sh-pfc/pinctrl.c +++ b/drivers/pinctrl/sh-pfc/pinctrl.c @@ -816,6 +816,13 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc) pmx->pctl_desc.pins = pmx->pins; pmx->pctl_desc.npins = pfc->info->nr_pins; - return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, - &pmx->pctl); + ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, + &pmx->pctl); + if (ret) { + dev_err(pfc->dev, "could not register: %i\n", ret); + + return ret; + } + + return pinctrl_enable(pmx->pctl); } diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index 717e3404900c..362c50918c13 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -893,6 +893,8 @@ static int ti_iodelay_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iod); + return pinctrl_enable(iod->pctl); + exit_out: of_node_put(np); return ret; diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 8ce2d87a238b..5e45385c5bdc 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -145,8 +145,9 @@ struct pinctrl_desc { extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data, struct pinctrl_dev **pctldev); +extern int pinctrl_enable(struct pinctrl_dev *pctldev); -/* Please use pinctrl_register_and_init() instead */ +/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */ extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data); -- cgit v1.2.3-71-gd317 From 404123c2db798027e852480ed9c4accef9f1d9e6 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 29 Mar 2017 19:06:20 +0300 Subject: virtio: allow drivers to validate features Some drivers can't support all features in all configurations. At the moment we blindly set FEATURES_OK and later FAILED. Support this better by adding a callback drivers can use to do some early checks. Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio.c | 6 ++++++ include/linux/virtio.h | 1 + 2 files changed, 7 insertions(+) (limited to 'include') diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 400d70b69379..48230a5e12f2 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1ULL << i)) __virtio_set_bit(dev, i); + if (drv->validate) { + err = drv->validate(dev); + if (err) + goto err; + } + err = virtio_finalize_features(dev); if (err) goto err; diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 04b0d3f95043..7edfbdb55a99 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -167,6 +167,7 @@ struct virtio_driver { unsigned int feature_table_size; const unsigned int *feature_table_legacy; unsigned int feature_table_size_legacy; + int (*validate)(struct virtio_device *dev); int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); -- cgit v1.2.3-71-gd317 From 54d5329d425650fafaf90660a139c771d2d49cae Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Fri, 7 Apr 2017 08:52:27 -0600 Subject: blk-mq-sched: fix crash in switch error path In elevator_switch(), if blk_mq_init_sched() fails, we attempt to fall back to the original scheduler. However, at this point, we've already torn down the original scheduler's tags, so this causes a crash. Doing the fallback like the legacy elevator path is much harder for mq, so fix it by just falling back to none, instead. Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 13 +++++-- block/blk-mq-sched.h | 2 +- block/blk-mq.c | 2 -- block/blk-sysfs.c | 2 +- block/elevator.c | 94 +++++++++++++++++++++++++++--------------------- include/linux/elevator.h | 2 +- 6 files changed, 67 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 0bb13bb51daa..e8c2ed654ef0 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -451,7 +451,7 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q, return ret; } -void blk_mq_sched_teardown(struct request_queue *q) +static void blk_mq_sched_tags_teardown(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; @@ -513,10 +513,19 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return 0; err: - blk_mq_sched_teardown(q); + blk_mq_sched_tags_teardown(q); + q->elevator = NULL; return ret; } +void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) +{ + if (e->type->ops.mq.exit_sched) + e->type->ops.mq.exit_sched(e); + blk_mq_sched_tags_teardown(q); + q->elevator = NULL; +} + int blk_mq_sched_init(struct request_queue *q) { int ret; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 19db25e0c95a..e704956e0862 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -33,7 +33,7 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, struct request *(*get_rq)(struct blk_mq_hw_ctx *)); int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); -void blk_mq_sched_teardown(struct request_queue *q); +void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx); diff --git a/block/blk-mq.c b/block/blk-mq.c index 72e744cd638c..cfb7c97b14ec 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2240,8 +2240,6 @@ void blk_mq_release(struct request_queue *q) struct blk_mq_hw_ctx *hctx; unsigned int i; - blk_mq_sched_teardown(q); - /* hctx kobj stays in hctx */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index c44b321335f3..37f0b3ad635e 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj) if (q->elevator) { ioc_clear_queue(q); - elevator_exit(q->elevator); + elevator_exit(q, q->elevator); } blk_exit_rl(&q->root_rl); diff --git a/block/elevator.c b/block/elevator.c index f236ef1d2be9..dbeecf7be719 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -252,11 +252,11 @@ int elevator_init(struct request_queue *q, char *name) } EXPORT_SYMBOL(elevator_init); -void elevator_exit(struct elevator_queue *e) +void elevator_exit(struct request_queue *q, struct elevator_queue *e) { mutex_lock(&e->sysfs_lock); if (e->uses_mq && e->type->ops.mq.exit_sched) - e->type->ops.mq.exit_sched(e); + blk_mq_exit_sched(q, e); else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) e->type->ops.sq.elevator_exit_fn(e); mutex_unlock(&e->sysfs_lock); @@ -941,6 +941,45 @@ void elv_unregister(struct elevator_type *e) } EXPORT_SYMBOL_GPL(elv_unregister); +static int elevator_switch_mq(struct request_queue *q, + struct elevator_type *new_e) +{ + int ret; + + blk_mq_freeze_queue(q); + blk_mq_quiesce_queue(q); + + if (q->elevator) { + if (q->elevator->registered) + elv_unregister_queue(q); + ioc_clear_queue(q); + elevator_exit(q, q->elevator); + } + + ret = blk_mq_init_sched(q, new_e); + if (ret) + goto out; + + if (new_e) { + ret = elv_register_queue(q); + if (ret) { + elevator_exit(q, q->elevator); + goto out; + } + } + + if (new_e) + blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); + else + blk_add_trace_msg(q, "elv switch: none"); + +out: + blk_mq_unfreeze_queue(q); + blk_mq_start_stopped_hw_queues(q, true); + return ret; + +} + /* * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we @@ -953,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) bool old_registered = false; int err; - if (q->mq_ops) { - blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); - } + if (q->mq_ops) + return elevator_switch_mq(q, new_e); /* * Turn on BYPASS and drain all requests w/ elevator private data. @@ -968,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) if (old) { old_registered = old->registered; - if (old->uses_mq) - blk_mq_sched_teardown(q); - - if (!q->mq_ops) - blk_queue_bypass_start(q); + blk_queue_bypass_start(q); /* unregister and clear all auxiliary data of the old elevator */ if (old_registered) @@ -982,53 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) } /* allocate, init and register new elevator */ - if (q->mq_ops) - err = blk_mq_init_sched(q, new_e); - else - err = new_e->ops.sq.elevator_init_fn(q, new_e); + err = new_e->ops.sq.elevator_init_fn(q, new_e); if (err) goto fail_init; - if (new_e) { - err = elv_register_queue(q); - if (err) - goto fail_register; - } + err = elv_register_queue(q); + if (err) + goto fail_register; /* done, kill the old one and finish */ if (old) { - elevator_exit(old); - if (!q->mq_ops) - blk_queue_bypass_end(q); + elevator_exit(q, old); + blk_queue_bypass_end(q); } - if (q->mq_ops) { - blk_mq_unfreeze_queue(q); - blk_mq_start_stopped_hw_queues(q, true); - } - - if (new_e) - blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); - else - blk_add_trace_msg(q, "elv switch: none"); + blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); return 0; fail_register: - if (q->mq_ops) - blk_mq_sched_teardown(q); - elevator_exit(q->elevator); + elevator_exit(q, q->elevator); fail_init: /* switch failed, restore and re-register old elevator */ if (old) { q->elevator = old; elv_register_queue(q); - if (!q->mq_ops) - blk_queue_bypass_end(q); - } - if (q->mq_ops) { - blk_mq_unfreeze_queue(q); - blk_mq_start_stopped_hw_queues(q, true); + blk_queue_bypass_end(q); } return err; diff --git a/include/linux/elevator.h b/include/linux/elevator.h index aebecc4ed088..22d39e8d4de1 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *); extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); -extern void elevator_exit(struct elevator_queue *); +extern void elevator_exit(struct request_queue *, struct elevator_queue *); extern int elevator_change(struct request_queue *, const char *); extern bool elv_bio_merge_ok(struct request *, struct bio *); extern struct elevator_queue *elevator_alloc(struct request_queue *, -- cgit v1.2.3-71-gd317 From 7587a5ae7eef0439f7be31f1b5959af062bbc5ec Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 7 Apr 2017 11:16:52 -0700 Subject: blk-mq: Introduce blk_mq_delay_run_hw_queue() Introduce a function that runs a hardware queue unconditionally after a delay. Note: there is already a function that stops and restarts a hardware queue after a delay, namely blk_mq_delay_queue(). This function will be used in the next patch in this series. Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Long Li Cc: K. Y. Srinivasan Signed-off-by: Jens Axboe --- block/blk-mq.c | 32 ++++++++++++++++++++++++++++++-- include/linux/blk-mq.h | 2 ++ 2 files changed, 32 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/block/blk-mq.c b/block/blk-mq.c index f1be5c3eb600..ad45ae743186 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1135,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) return hctx->next_cpu; } -void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) +static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, + unsigned long msecs) { if (unlikely(blk_mq_hctx_stopped(hctx) || !blk_mq_hw_queue_mapped(hctx))) @@ -1152,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) put_cpu(); } - kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); + if (msecs == 0) + kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), + &hctx->run_work); + else + kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), + &hctx->delayed_run_work, + msecs_to_jiffies(msecs)); +} + +void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) +{ + __blk_mq_delay_run_hw_queue(hctx, true, msecs); +} +EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); + +void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) +{ + __blk_mq_delay_run_hw_queue(hctx, async, 0); } void blk_mq_run_hw_queues(struct request_queue *q, bool async) @@ -1255,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work) __blk_mq_run_hw_queue(hctx); } +static void blk_mq_delayed_run_work_fn(struct work_struct *work) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work); + + __blk_mq_run_hw_queue(hctx); +} + static void blk_mq_delay_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; @@ -1962,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q, node = hctx->numa_node = set->numa_node; INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); + INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn); INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b296a9006117..9382c5da7a2e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -51,6 +51,7 @@ struct blk_mq_hw_ctx { atomic_t nr_active; + struct delayed_work delayed_run_work; struct delayed_work delay_work; struct hlist_node cpuhp_dead; @@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, -- cgit v1.2.3-71-gd317 From 6d8c6c0f97ad8a3517c42b179c1dc8e77397d0e2 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 7 Apr 2017 12:40:09 -0600 Subject: blk-mq: Restart a single queue if tag sets are shared To improve scalability, if hardware queues are shared, restart a single hardware queue in round-robin fashion. Rename blk_mq_sched_restart_queues() to reflect the new semantics. Remove blk_mq_sched_mark_restart_queue() because this function has no callers. Remove flag QUEUE_FLAG_RESTART because this patch removes the code that uses this flag. Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Hannes Reinecke Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 63 ++++++++++++++++++++++++++++++++++++++++++-------- block/blk-mq-sched.h | 16 +------------ block/blk-mq.c | 2 +- include/linux/blkdev.h | 1 - 4 files changed, 55 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index e8c2ed654ef0..c974a1bbf4cb 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -318,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, return true; } -static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) +static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) { if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (blk_mq_hctx_has_pending(hctx)) + if (blk_mq_hctx_has_pending(hctx)) { blk_mq_run_hw_queue(hctx, true); + return true; + } } + return false; } -void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) -{ - struct request_queue *q = hctx->queue; - unsigned int i; +/** + * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list + * @pos: loop cursor. + * @skip: the list element that will not be examined. Iteration starts at + * @skip->next. + * @head: head of the list to examine. This list must have at least one + * element, namely @skip. + * @member: name of the list_head structure within typeof(*pos). + */ +#define list_for_each_entry_rcu_rr(pos, skip, head, member) \ + for ((pos) = (skip); \ + (pos = (pos)->member.next != (head) ? list_entry_rcu( \ + (pos)->member.next, typeof(*pos), member) : \ + list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \ + (pos) != (skip); ) - if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { - if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_sched_restart_hctx(hctx); +/* + * Called after a driver tag has been freed to check whether a hctx needs to + * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware + * queues in a round-robin fashion if the tag set of @hctx is shared with other + * hardware queues. + */ +void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) +{ + struct blk_mq_tags *const tags = hctx->tags; + struct blk_mq_tag_set *const set = hctx->queue->tag_set; + struct request_queue *const queue = hctx->queue, *q; + struct blk_mq_hw_ctx *hctx2; + unsigned int i, j; + + if (set->flags & BLK_MQ_F_TAG_SHARED) { + rcu_read_lock(); + list_for_each_entry_rcu_rr(q, queue, &set->tag_list, + tag_set_list) { + queue_for_each_hw_ctx(q, hctx2, i) + if (hctx2->tags == tags && + blk_mq_sched_restart_hctx(hctx2)) + goto done; + } + j = hctx->queue_num + 1; + for (i = 0; i < queue->nr_hw_queues; i++, j++) { + if (j == queue->nr_hw_queues) + j = 0; + hctx2 = queue->queue_hw_ctx[j]; + if (hctx2->tags == tags && + blk_mq_sched_restart_hctx(hctx2)) + break; } +done: + rcu_read_unlock(); } else { blk_mq_sched_restart_hctx(hctx); } diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index e704956e0862..3a9e6e40558b 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, struct request **merged_request); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); -void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); +void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue, bool async, bool can_block); @@ -136,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); } -/* - * Mark a hardware queue and the request queue it belongs to as needing a - * restart. - */ -static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx) -{ - struct request_queue *q = hctx->queue; - - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) - set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) - set_bit(QUEUE_FLAG_RESTART, &q->queue_flags); -} - static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) { return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); diff --git a/block/blk-mq.c b/block/blk-mq.c index ad45ae743186..572966f49596 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -348,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); if (sched_tag != -1) blk_mq_sched_completed_request(hctx, rq); - blk_mq_sched_restart_queues(hctx); + blk_mq_sched_restart(hctx); blk_queue_exit(q); } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5a7da607ca04..7548f332121a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -610,7 +610,6 @@ struct request_queue { #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ #define QUEUE_FLAG_DAX 26 /* device supports DAX */ #define QUEUE_FLAG_STATS 27 /* track rq completion times */ -#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ -- cgit v1.2.3-71-gd317 From d79bf21e0e02f8ad24219f0587cc599a7e67f6c6 Mon Sep 17 00:00:00 2001 From: Jessica Yu Date: Fri, 7 Apr 2017 16:04:48 -0700 Subject: vmlinux.lds: add missing VMLINUX_SYMBOL macros When __{start,end}_ro_after_init is referenced from C code, we run into the following build errors on blackfin: kernel/extable.c:169: undefined reference to `__start_ro_after_init' kernel/extable.c:169: undefined reference to `__end_ro_after_init' The build error is due to the fact that blackfin is one of the few arches that prepends an underscore '_' to all symbols defined in C. Fix this by wrapping __{start,end}_ro_after_init in vmlinux.lds.h with VMLINUX_SYMBOL(), which adds the necessary prefix for arches that have HAVE_UNDERSCORE_SYMBOL_PREFIX. Link: http://lkml.kernel.org/r/1491259387-15869-1-git-send-email-jeyu@redhat.com Signed-off-by: Jessica Yu Acked-by: Kees Cook Cc: Arnd Bergmann Cc: Eddie Kovsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/vmlinux.lds.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 7cdfe167074f..143db9c523e2 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -261,9 +261,9 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ - __start_ro_after_init = .; \ + VMLINUX_SYMBOL(__start_ro_after_init) = .; \ *(.data..ro_after_init) \ - __end_ro_after_init = .; + VMLINUX_SYMBOL(__end_ro_after_init) = .; #endif /* -- cgit v1.2.3-71-gd317 From e3689e470fa0d9ebaa9d13d069e8d73c8d82a11d Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Tue, 7 Mar 2017 10:02:53 -0700 Subject: drm/msm: Add MSM_PARAM_GMEM_BASE User space needs to know where the GMEM whole starts so that they can set up the addressing correctly. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 3 +++ include/uapi/drm/msm_drm.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 9a92bcf982b8..4cac22633ce4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -35,6 +35,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) case MSM_PARAM_GMEM_SIZE: *value = adreno_gpu->gmem; return 0; + case MSM_PARAM_GMEM_BASE: + *value = 0x100000; + return 0; case MSM_PARAM_CHIP_ID: *value = adreno_gpu->rev.patchid | (adreno_gpu->rev.minor << 8) | diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 4d5d6a2bc59e..a4a189a240d7 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -72,6 +72,7 @@ struct drm_msm_timespec { #define MSM_PARAM_CHIP_ID 0x03 #define MSM_PARAM_MAX_FREQ 0x04 #define MSM_PARAM_TIMESTAMP 0x05 +#define MSM_PARAM_GMEM_BASE 0x06 struct drm_msm_param { __u32 pipe; /* in, MSM_PIPE_x */ -- cgit v1.2.3-71-gd317 From 2008c1544c73d5190f81ef1790fa5bd2fade5bd0 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 4 Apr 2017 21:09:20 +0300 Subject: Revert "virtio_pci: don't duplicate the msix_enable flag in struct pci_dev" This reverts commit 53a020c661741f3b87ad3ac6fa545088aaebac9b. The cleanup seems to be one of the changes that broke hybernation for some users. We are still not sure why but revert helps. Tested-by: Mike Galbraith Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_pci_common.c | 5 +++-- drivers/virtio/virtio_pci_common.h | 2 ++ drivers/virtio/virtio_pci_legacy.c | 2 +- drivers/virtio/virtio_pci_modern.c | 2 +- include/uapi/linux/virtio_pci.h | 2 +- 5 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 4608fd9aaa6c..3921b0a2439e 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -125,7 +125,7 @@ void vp_del_vqs(struct virtio_device *vdev) vp_remove_vqs(vdev); - if (vp_dev->pci_dev->msix_enabled) { + if (vp_dev->msix_enabled) { for (i = 0; i < vp_dev->msix_vectors; i++) free_cpumask_var(vp_dev->msix_affinity_masks[i]); @@ -249,6 +249,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, allocated_vectors++; } + vp_dev->msix_enabled = 1; return 0; out_remove_vqs: @@ -343,7 +344,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) if (!vq->callback) return -EINVAL; - if (vp_dev->pci_dev->msix_enabled) { + if (vp_dev->msix_enabled) { int vec = vp_dev->msix_vector_map[vq->index]; struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index ac8c9d788964..c8074997fd28 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -64,6 +64,8 @@ struct virtio_pci_device { /* the IO mapping for the PCI config space */ void __iomem *ioaddr; + /* MSI-X support */ + int msix_enabled; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index f7362c5fe18a..5dd01f09608b 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -165,7 +165,7 @@ static void del_vq(struct virtqueue *vq) iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - if (vp_dev->pci_dev->msix_enabled) { + if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 7bc3004b840e..7ce36daccc31 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -411,7 +411,7 @@ static void del_vq(struct virtqueue *vq) vp_iowrite16(vq->index, &vp_dev->common->queue_select); - if (vp_dev->pci_dev->msix_enabled) { + if (vp_dev->msix_enabled) { vp_iowrite16(VIRTIO_MSI_NO_VECTOR, &vp_dev->common->queue_msix_vector); /* Flush the write out to device */ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 15b4385a2be1..90007a1abcab 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -79,7 +79,7 @@ * configuration space */ #define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ -#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) +#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 -- cgit v1.2.3-71-gd317 From c0c379e2931b05facef538e53bf3b21f283d9a0b Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 13 Apr 2017 14:56:23 -0700 Subject: mm: drop unused pmdp_huge_get_and_clear_notify() Dave noticed that after fixing MADV_DONTNEED vs numa balancing race the last pmdp_huge_get_and_clear_notify() user is gone. Let's drop the helper. Link: http://lkml.kernel.org/r/20170306112047.24809-1-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Cc: Dave Hansen Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmu_notifier.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'include') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 51891fb0d3ce..c91b3bcd158f 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) ___pud; \ }) -#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ -({ \ - unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ - pmd_t ___pmd; \ - \ - ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \ - mmu_notifier_invalidate_range(__mm, ___haddr, \ - ___haddr + HPAGE_PMD_SIZE); \ - \ - ___pmd; \ -}) - /* * set_pte_at_notify() sets the pte _after_ running the notifier. * This is safe to start by updating the secondary MMUs, because the primary MMU @@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush #define pudp_huge_clear_flush_notify pudp_huge_clear_flush -#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear #define set_pte_at_notify set_pte_at #endif /* CONFIG_MMU_NOTIFIER */ -- cgit v1.2.3-71-gd317 From 5a8d75a1b8c99bdc926ba69b7b7dbe4fae81a5af Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 14 Apr 2017 13:58:29 -0600 Subject: block: fix bio_will_gap() for first bvec with offset Commit 729204ef49ec("block: relax check on sg gap") allows us to merge bios, if both are physically contiguous. This change can merge a huge number of small bios, through mkfs for example, mkfs.ntfs running time can be decreased to ~1/10. But if one rq starts with a non-aligned buffer (the 1st bvec's bv_offset is non-zero) and if we allow the merge, it is quite difficult to respect sg gap limit, especially the max segment size, or we risk having an unaligned virtual boundary. This patch tries to avoid the issue by disallowing a merge, if the req starts with an unaligned buffer. Also add comments to explain why the merged segment can't end in unaligned virt boundary. Fixes: 729204ef49ec ("block: relax check on sg gap") Tested-by: Johannes Thumshirn Reviewed-by: Johannes Thumshirn Signed-off-by: Ming Lei Rewrote parts of the commit message and comments. Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7548f332121a..01a696b0a4d3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1672,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q, return true; } -static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, - struct bio *next) +static inline bool bio_will_gap(struct request_queue *q, + struct request *prev_rq, + struct bio *prev, + struct bio *next) { if (bio_has_data(prev) && queue_virt_boundary(q)) { struct bio_vec pb, nb; + /* + * don't merge if the 1st bio starts with non-zero + * offset, otherwise it is quite difficult to respect + * sg gap limit. We work hard to merge a huge number of small + * single bios in case of mkfs. + */ + if (prev_rq) + bio_get_first_bvec(prev_rq->bio, &pb); + else + bio_get_first_bvec(prev, &pb); + if (pb.bv_offset) + return true; + + /* + * We don't need to worry about the situation that the + * merged segment ends in unaligned virt boundary: + * + * - if 'pb' ends aligned, the merged segment ends aligned + * - if 'pb' ends unaligned, the next bio must include + * one single bvec of 'nb', otherwise the 'nb' can't + * merge with 'pb' + */ bio_get_last_bvec(prev, &pb); bio_get_first_bvec(next, &nb); @@ -1690,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, static inline bool req_gap_back_merge(struct request *req, struct bio *bio) { - return bio_will_gap(req->q, req->biotail, bio); + return bio_will_gap(req->q, req, req->biotail, bio); } static inline bool req_gap_front_merge(struct request *req, struct bio *bio) { - return bio_will_gap(req->q, bio, req->bio); + return bio_will_gap(req->q, NULL, bio, req->bio); } int kblockd_schedule_work(struct work_struct *work); -- cgit v1.2.3-71-gd317 From f9b67f0014cba18f1aabb6fa9272335a043eb6fd Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Wed, 19 Apr 2017 13:36:10 -0600 Subject: dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic macro Seeing the kunmap_atomic dma_buf_ops share the same name with a macro in highmem.h, the former can be aliased if any dma-buf user includes that header. I'm personally trying to include highmem.h inside scatterlist.h and this breaks the dma-buf code proper. Christoph Hellwig suggested [1] renaming it and pushing this patch ASAP. To maintain consistency I've renamed all four of kmap* and kunmap* to be map* and unmap*. (Even though only kmap_atomic presently conflicts.) [1] https://www.spinics.net/lists/target-devel/msg15070.html Signed-off-by: Logan Gunthorpe Reviewed-by: Sinclair Yeh Acked-by: Daniel Vetter Acked-by: Sumit Semwal Signed-off-by: Sumit Semwal Link: http://patchwork.freedesktop.org/patch/msgid/1492630570-879-1-git-send-email-logang@deltatee.com --- drivers/dma-buf/dma-buf.c | 16 ++++++++-------- drivers/gpu/drm/armada/armada_gem.c | 8 ++++---- drivers/gpu/drm/drm_prime.c | 8 ++++---- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 8 ++++---- drivers/gpu/drm/i915/selftests/mock_dmabuf.c | 8 ++++---- drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | 8 ++++---- drivers/gpu/drm/tegra/gem.c | 8 ++++---- drivers/gpu/drm/udl/udl_dmabuf.c | 8 ++++---- drivers/gpu/drm/vmwgfx/vmwgfx_prime.c | 8 ++++---- drivers/media/v4l2-core/videobuf2-dma-contig.c | 4 ++-- drivers/media/v4l2-core/videobuf2-dma-sg.c | 4 ++-- drivers/media/v4l2-core/videobuf2-vmalloc.c | 4 ++-- drivers/staging/android/ion/ion.c | 8 ++++---- include/linux/dma-buf.h | 22 +++++++++++----------- 14 files changed, 61 insertions(+), 61 deletions(-) (limited to 'include') diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index f72aaacbe023..512bdbc23bbb 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -405,8 +405,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) || !exp_info->ops->map_dma_buf || !exp_info->ops->unmap_dma_buf || !exp_info->ops->release - || !exp_info->ops->kmap_atomic - || !exp_info->ops->kmap + || !exp_info->ops->map_atomic + || !exp_info->ops->map || !exp_info->ops->mmap)) { return ERR_PTR(-EINVAL); } @@ -872,7 +872,7 @@ void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) { WARN_ON(!dmabuf); - return dmabuf->ops->kmap_atomic(dmabuf, page_num); + return dmabuf->ops->map_atomic(dmabuf, page_num); } EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); @@ -889,8 +889,8 @@ void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, { WARN_ON(!dmabuf); - if (dmabuf->ops->kunmap_atomic) - dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); + if (dmabuf->ops->unmap_atomic) + dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr); } EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); @@ -907,7 +907,7 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) { WARN_ON(!dmabuf); - return dmabuf->ops->kmap(dmabuf, page_num); + return dmabuf->ops->map(dmabuf, page_num); } EXPORT_SYMBOL_GPL(dma_buf_kmap); @@ -924,8 +924,8 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, { WARN_ON(!dmabuf); - if (dmabuf->ops->kunmap) - dmabuf->ops->kunmap(dmabuf, page_num, vaddr); + if (dmabuf->ops->unmap) + dmabuf->ops->unmap(dmabuf, page_num, vaddr); } EXPORT_SYMBOL_GPL(dma_buf_kunmap); diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 1597458d884e..d6c2a5d190eb 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -529,10 +529,10 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = { .map_dma_buf = armada_gem_prime_map_dma_buf, .unmap_dma_buf = armada_gem_prime_unmap_dma_buf, .release = drm_gem_dmabuf_release, - .kmap_atomic = armada_gem_dmabuf_no_kmap, - .kunmap_atomic = armada_gem_dmabuf_no_kunmap, - .kmap = armada_gem_dmabuf_no_kmap, - .kunmap = armada_gem_dmabuf_no_kunmap, + .map_atomic = armada_gem_dmabuf_no_kmap, + .unmap_atomic = armada_gem_dmabuf_no_kunmap, + .map = armada_gem_dmabuf_no_kmap, + .unmap = armada_gem_dmabuf_no_kunmap, .mmap = armada_gem_dmabuf_mmap, }; diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 9fb65b736a90..954eb848b5e2 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -403,10 +403,10 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .map_dma_buf = drm_gem_map_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, - .kmap = drm_gem_dmabuf_kmap, - .kmap_atomic = drm_gem_dmabuf_kmap_atomic, - .kunmap = drm_gem_dmabuf_kunmap, - .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, + .map = drm_gem_dmabuf_kmap, + .map_atomic = drm_gem_dmabuf_kmap_atomic, + .unmap = drm_gem_dmabuf_kunmap, + .unmap_atomic = drm_gem_dmabuf_kunmap_atomic, .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 11898cd97596..f225bf680b6d 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -200,10 +200,10 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, - .kmap = i915_gem_dmabuf_kmap, - .kmap_atomic = i915_gem_dmabuf_kmap_atomic, - .kunmap = i915_gem_dmabuf_kunmap, - .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, + .map = i915_gem_dmabuf_kmap, + .map_atomic = i915_gem_dmabuf_kmap_atomic, + .unmap = i915_gem_dmabuf_kunmap, + .unmap_atomic = i915_gem_dmabuf_kunmap_atomic, .mmap = i915_gem_dmabuf_mmap, .vmap = i915_gem_dmabuf_vmap, .vunmap = i915_gem_dmabuf_vunmap, diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c index 99da8f4ef497..302f7d103635 100644 --- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c @@ -129,10 +129,10 @@ static const struct dma_buf_ops mock_dmabuf_ops = { .map_dma_buf = mock_map_dma_buf, .unmap_dma_buf = mock_unmap_dma_buf, .release = mock_dmabuf_release, - .kmap = mock_dmabuf_kmap, - .kmap_atomic = mock_dmabuf_kmap_atomic, - .kunmap = mock_dmabuf_kunmap, - .kunmap_atomic = mock_dmabuf_kunmap_atomic, + .map = mock_dmabuf_kmap, + .map_atomic = mock_dmabuf_kmap_atomic, + .unmap = mock_dmabuf_kunmap, + .unmap_atomic = mock_dmabuf_kunmap_atomic, .mmap = mock_dmabuf_mmap, .vmap = mock_dmabuf_vmap, .vunmap = mock_dmabuf_vunmap, diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index ee5883f59be5..0dbe0306953d 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -160,10 +160,10 @@ static struct dma_buf_ops omap_dmabuf_ops = { .release = omap_gem_dmabuf_release, .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, .end_cpu_access = omap_gem_dmabuf_end_cpu_access, - .kmap_atomic = omap_gem_dmabuf_kmap_atomic, - .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic, - .kmap = omap_gem_dmabuf_kmap, - .kunmap = omap_gem_dmabuf_kunmap, + .map_atomic = omap_gem_dmabuf_kmap_atomic, + .unmap_atomic = omap_gem_dmabuf_kunmap_atomic, + .map = omap_gem_dmabuf_kmap, + .unmap = omap_gem_dmabuf_kunmap, .mmap = omap_gem_dmabuf_mmap, }; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 17e62ecb5d4d..8672f5d2f237 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -619,10 +619,10 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { .map_dma_buf = tegra_gem_prime_map_dma_buf, .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, .release = tegra_gem_prime_release, - .kmap_atomic = tegra_gem_prime_kmap_atomic, - .kunmap_atomic = tegra_gem_prime_kunmap_atomic, - .kmap = tegra_gem_prime_kmap, - .kunmap = tegra_gem_prime_kunmap, + .map_atomic = tegra_gem_prime_kmap_atomic, + .unmap_atomic = tegra_gem_prime_kunmap_atomic, + .map = tegra_gem_prime_kmap, + .unmap = tegra_gem_prime_kunmap, .mmap = tegra_gem_prime_mmap, .vmap = tegra_gem_prime_vmap, .vunmap = tegra_gem_prime_vunmap, diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c index ac90ffdb5912..ed0e636243b2 100644 --- a/drivers/gpu/drm/udl/udl_dmabuf.c +++ b/drivers/gpu/drm/udl/udl_dmabuf.c @@ -191,10 +191,10 @@ static struct dma_buf_ops udl_dmabuf_ops = { .detach = udl_detach_dma_buf, .map_dma_buf = udl_map_dma_buf, .unmap_dma_buf = udl_unmap_dma_buf, - .kmap = udl_dmabuf_kmap, - .kmap_atomic = udl_dmabuf_kmap_atomic, - .kunmap = udl_dmabuf_kunmap, - .kunmap_atomic = udl_dmabuf_kunmap_atomic, + .map = udl_dmabuf_kmap, + .map_atomic = udl_dmabuf_kmap_atomic, + .unmap = udl_dmabuf_kunmap, + .unmap_atomic = udl_dmabuf_kunmap_atomic, .mmap = udl_dmabuf_mmap, .release = drm_gem_dmabuf_release, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c index 31fe32d8d65a..0d42a46521fc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c @@ -108,10 +108,10 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = { .map_dma_buf = vmw_prime_map_dma_buf, .unmap_dma_buf = vmw_prime_unmap_dma_buf, .release = NULL, - .kmap = vmw_prime_dmabuf_kmap, - .kmap_atomic = vmw_prime_dmabuf_kmap_atomic, - .kunmap = vmw_prime_dmabuf_kunmap, - .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic, + .map = vmw_prime_dmabuf_kmap, + .map_atomic = vmw_prime_dmabuf_kmap_atomic, + .unmap = vmw_prime_dmabuf_kunmap, + .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic, .mmap = vmw_prime_dmabuf_mmap, .vmap = vmw_prime_dmabuf_vmap, .vunmap = vmw_prime_dmabuf_vunmap, diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index fb6a177be461..2db0413f5d57 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -356,8 +356,8 @@ static struct dma_buf_ops vb2_dc_dmabuf_ops = { .detach = vb2_dc_dmabuf_ops_detach, .map_dma_buf = vb2_dc_dmabuf_ops_map, .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, - .kmap = vb2_dc_dmabuf_ops_kmap, - .kmap_atomic = vb2_dc_dmabuf_ops_kmap, + .map = vb2_dc_dmabuf_ops_kmap, + .map_atomic = vb2_dc_dmabuf_ops_kmap, .vmap = vb2_dc_dmabuf_ops_vmap, .mmap = vb2_dc_dmabuf_ops_mmap, .release = vb2_dc_dmabuf_ops_release, diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index ecff8f492c4f..6fd1343b7c13 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -504,8 +504,8 @@ static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { .detach = vb2_dma_sg_dmabuf_ops_detach, .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, - .kmap = vb2_dma_sg_dmabuf_ops_kmap, - .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap, + .map = vb2_dma_sg_dmabuf_ops_kmap, + .map_atomic = vb2_dma_sg_dmabuf_ops_kmap, .vmap = vb2_dma_sg_dmabuf_ops_vmap, .mmap = vb2_dma_sg_dmabuf_ops_mmap, .release = vb2_dma_sg_dmabuf_ops_release, diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 3f778147cdef..27d1db3bb8cf 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -342,8 +342,8 @@ static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { .detach = vb2_vmalloc_dmabuf_ops_detach, .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, - .kmap = vb2_vmalloc_dmabuf_ops_kmap, - .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap, + .map = vb2_vmalloc_dmabuf_ops_kmap, + .map_atomic = vb2_vmalloc_dmabuf_ops_kmap, .vmap = vb2_vmalloc_dmabuf_ops_vmap, .mmap = vb2_vmalloc_dmabuf_ops_mmap, .release = vb2_vmalloc_dmabuf_ops_release, diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index f45115fce4eb..95a7f1648c00 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1020,10 +1020,10 @@ static const struct dma_buf_ops dma_buf_ops = { .release = ion_dma_buf_release, .begin_cpu_access = ion_dma_buf_begin_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access, - .kmap_atomic = ion_dma_buf_kmap, - .kunmap_atomic = ion_dma_buf_kunmap, - .kmap = ion_dma_buf_kmap, - .kunmap = ion_dma_buf_kunmap, + .map_atomic = ion_dma_buf_kmap, + .unmap_atomic = ion_dma_buf_kunmap, + .map = ion_dma_buf_kmap, + .unmap = ion_dma_buf_kunmap, }; struct dma_buf *ion_share_dma_buf(struct ion_client *client, diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index bfb3704fc6fc..79f27d60ec66 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -39,13 +39,13 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf - * @kmap_atomic: maps a page from the buffer into kernel address - * space, users may not block until the subsequent unmap call. - * This callback must not sleep. - * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer. - * This Callback must not sleep. - * @kmap: maps a page from the buffer into kernel address space. - * @kunmap: [optional] unmaps a page from the buffer. + * @map_atomic: maps a page from the buffer into kernel address + * space, users may not block until the subsequent unmap call. + * This callback must not sleep. + * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. + * This Callback must not sleep. + * @map: maps a page from the buffer into kernel address space. + * @unmap: [optional] unmaps a page from the buffer. * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer @@ -206,10 +206,10 @@ struct dma_buf_ops { * to be restarted. */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); - void *(*kmap_atomic)(struct dma_buf *, unsigned long); - void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); - void *(*kmap)(struct dma_buf *, unsigned long); - void (*kunmap)(struct dma_buf *, unsigned long, void *); + void *(*map_atomic)(struct dma_buf *, unsigned long); + void (*unmap_atomic)(struct dma_buf *, unsigned long, void *); + void *(*map)(struct dma_buf *, unsigned long); + void (*unmap)(struct dma_buf *, unsigned long, void *); /** * @mmap: -- cgit v1.2.3-71-gd317