From e7e0c3e26587749b62d17b9dd0532874186c77f7 Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Sun, 3 Apr 2016 16:15:00 -0300 Subject: [media] videobuf2-core: Check user space planes array in dqbuf The number of planes in videobuf2 is specific to a buffer. In order to verify that the planes array provided by the user is long enough, a new vb2_buf_op is required. Call __verify_planes_array() when the dequeued buffer is known. Return an error to the caller if there was one, otherwise remove the buffer from the done list. Signed-off-by: Sakari Ailus Acked-by: Hans Verkuil Cc: stable@vger.kernel.org # for v4.4 and later Signed-off-by: Mauro Carvalho Chehab --- include/media/videobuf2-core.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 8a0f55b6c2ba..5342ff4d748f 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -375,6 +375,9 @@ struct vb2_ops { /** * struct vb2_ops - driver-specific callbacks * + * @verify_planes_array: Verify that a given user space structure contains + * enough planes for the buffer. This is called + * for each dequeued buffer. * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. * For V4L2 this is a struct v4l2_buffer. * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. @@ -384,6 +387,7 @@ struct vb2_ops { * the vb2_buffer struct. */ struct vb2_buf_ops { + int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb); void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, struct vb2_plane *planes); -- cgit v1.2.3-71-gd317 From 75dd602a5198a6e5f75534db52b6e6fbaabb33d1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 30 Mar 2016 11:36:59 +0200 Subject: lockdep: Fix lock_chain::base size lock_chain::base is used to store an index into the chain_hlocks[] array, however that array contains more elements than can be indexed using the u16. Change the lock_chain structure to use a bitfield to encode the data it needs and add BUILD_BUG_ON() assertions to check the fields are wide enough. Also, for DEBUG_LOCKDEP, assert that we don't run out of elements of that array; as that would wreck the collision detectoring. Signed-off-by: Peter Zijlstra (Intel) Cc: Alfredo Alvarez Fernandez Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Sedat Dilek Cc: Theodore Ts'o Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20160330093659.GS3408@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 8 +++++--- kernel/locking/lockdep.c | 24 +++++++++++++++++++++++- kernel/locking/lockdep_proc.c | 2 ++ 3 files changed, 30 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index d026b190c530..d10ef06971b5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -196,9 +196,11 @@ struct lock_list { * We record lock dependency chains, so that we can cache them: */ struct lock_chain { - u8 irq_context; - u8 depth; - u16 base; + /* see BUILD_BUG_ON()s in lookup_chain_cache() */ + unsigned int irq_context : 2, + depth : 6, + base : 24; + /* 4 byte hole */ struct hlist_node entry; u64 chain_key; }; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index beb06f604420..78c1c0ee6dc1 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2176,15 +2176,37 @@ cache_hit: chain->irq_context = hlock->irq_context; i = get_first_held_lock(curr, hlock); chain->depth = curr->lockdep_depth + 1 - i; + + BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks)); + BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); + BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes)); + if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { chain->base = nr_chain_hlocks; - nr_chain_hlocks += chain->depth; for (j = 0; j < chain->depth - 1; j++, i++) { int lock_id = curr->held_locks[i].class_idx - 1; chain_hlocks[chain->base + j] = lock_id; } chain_hlocks[chain->base + j] = class - lock_classes; } + + if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS) + nr_chain_hlocks += chain->depth; + +#ifdef CONFIG_DEBUG_LOCKDEP + /* + * Important for check_no_collision(). + */ + if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) { + if (debug_locks_off_graph_unlock()) + return 0; + + print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); + dump_stack(); + return 0; + } +#endif + hlist_add_head_rcu(&chain->entry, hash_head); debug_atomic_inc(chain_lookup_misses); inc_chains(); diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index dbb61a302548..a0f61effad25 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -141,6 +141,8 @@ static int lc_show(struct seq_file *m, void *v) int i; if (v == SEQ_START_TOKEN) { + if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS) + seq_printf(m, "(buggered) "); seq_printf(m, "all lock chains:\n"); return 0; } -- cgit v1.2.3-71-gd317 From 7ceb2afbd6aee4643056b47156baad6841db8e78 Mon Sep 17 00:00:00 2001 From: Elad Raz Date: Thu, 21 Apr 2016 12:52:43 +0200 Subject: switchdev: Adding complete operation to deferred switchdev ops When using switchdev deferred operation (SWITCHDEV_F_DEFER), the operation is executed in different context and the application doesn't have any way to get the operation real status. Adding a completion callback fixes that. This patch adds fields to switchdev_attr and switchdev_obj "complete_priv" field which is used by the "complete" callback. Application can set a complete function which will be called once the operation executed. Signed-off-by: Elad Raz Signed-off-by: Jiri Pirko Reviewed-by: Ido Schimmel Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/net/switchdev.h | 4 ++++ net/switchdev/switchdev.c | 6 ++++++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/include/net/switchdev.h b/include/net/switchdev.h index d451122e8404..51d77b2ce2b2 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -54,6 +54,8 @@ struct switchdev_attr { struct net_device *orig_dev; enum switchdev_attr_id id; u32 flags; + void *complete_priv; + void (*complete)(struct net_device *dev, int err, void *priv); union { struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ u8 stp_state; /* PORT_STP_STATE */ @@ -75,6 +77,8 @@ struct switchdev_obj { struct net_device *orig_dev; enum switchdev_obj_id id; u32 flags; + void *complete_priv; + void (*complete)(struct net_device *dev, int err, void *priv); }; /* SWITCHDEV_OBJ_ID_PORT_VLAN */ diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 2b9b98f1c2ff..b7e01d88bdc5 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -305,6 +305,8 @@ static void switchdev_port_attr_set_deferred(struct net_device *dev, if (err && err != -EOPNOTSUPP) netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", err, attr->id); + if (attr->complete) + attr->complete(dev, err, attr->complete_priv); } static int switchdev_port_attr_set_defer(struct net_device *dev, @@ -434,6 +436,8 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev, if (err && err != -EOPNOTSUPP) netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", err, obj->id); + if (obj->complete) + obj->complete(dev, err, obj->complete_priv); } static int switchdev_port_obj_add_defer(struct net_device *dev, @@ -502,6 +506,8 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev, if (err && err != -EOPNOTSUPP) netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", err, obj->id); + if (obj->complete) + obj->complete(dev, err, obj->complete_priv); } static int switchdev_port_obj_del_defer(struct net_device *dev, -- cgit v1.2.3-71-gd317 From 748164802c1bd2c52937d20782b07d8c68dd9a4f Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 22 Apr 2016 11:28:08 +0200 Subject: macsec: add missing macsec prefix in uapi I accidentally forgot some MACSEC_ prefixes in if_macsec.h. Fixes: dece8d2b78d1 ("uapi: add MACsec bits") Signed-off-by: Sabrina Dubroca Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/macsec.c | 12 +++++++----- include/uapi/linux/if_macsec.h | 4 ++-- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index b37d348b8ea0..9f63cc7b0a73 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2232,7 +2232,8 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) return 1; if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || - nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) || + nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, + MACSEC_DEFAULT_CIPHER_ID) || nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || @@ -3096,7 +3097,7 @@ unregister: static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) { - u64 csid = DEFAULT_CIPHER_ID; + u64 csid = MACSEC_DEFAULT_CIPHER_ID; u8 icv_len = DEFAULT_ICV_LEN; int flag; bool es, scb, sci; @@ -3111,8 +3112,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); switch (csid) { - case DEFAULT_CIPHER_ID: - case DEFAULT_CIPHER_ALT: + case MACSEC_DEFAULT_CIPHER_ID: + case MACSEC_DEFAULT_CIPHER_ALT: if (icv_len < MACSEC_MIN_ICV_LEN || icv_len > MACSEC_MAX_ICV_LEN) return -EINVAL; @@ -3185,7 +3186,8 @@ static int macsec_fill_info(struct sk_buff *skb, if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || - nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) || + nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, + MACSEC_DEFAULT_CIPHER_ID) || nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h index 26b0d1e3e3e7..4c58d9917aa4 100644 --- a/include/uapi/linux/if_macsec.h +++ b/include/uapi/linux/if_macsec.h @@ -19,8 +19,8 @@ #define MACSEC_MAX_KEY_LEN 128 -#define DEFAULT_CIPHER_ID 0x0080020001000001ULL -#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL +#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL +#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL #define MACSEC_MIN_ICV_LEN 8 #define MACSEC_MAX_ICV_LEN 32 -- cgit v1.2.3-71-gd317 From 046339eaab26804f52f6604877f5674f70815b26 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Fri, 22 Apr 2016 00:33:03 +0300 Subject: net/mlx5e: Device's mtu field is u16 and not int For set/query MTU port firmware commands the MTU field is 16 bits, here I changed all the "int mtu" parameters of the functions wrapping those firmware commands to be u16. Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/port.c | 10 +++++----- include/linux/mlx5/port.h | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5acf346e048e..99eb1c1a3b7b 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_hca_vport_context *rep; - int max_mtu; - int oper_mtu; + u16 max_mtu; + u16 oper_mtu; int err; u8 ib_link_width_oper; u8 vl_hw_cap; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e0adb604f461..2fbbc625f106 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1408,7 +1408,7 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - int hw_mtu; + u16 hw_mtu; int err; err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); @@ -2004,7 +2004,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; bool was_opened; - int max_mtu; + u16 max_mtu; int err = 0; mlx5_query_port_max_mtu(mdev, &max_mtu, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index ae378c575deb..53cc1e2c693b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); -static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, - int *max_mtu, int *oper_mtu, u8 port) +static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, + u16 *max_mtu, u16 *oper_mtu, u8 port) { u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; @@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); } -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port) { u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; @@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) } EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port) { mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); } EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, u8 port) { mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index a1d145abd4eb..b30250ab7604 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, u8 port); int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, -- cgit v1.2.3-71-gd317 From cd255efff9baadd654d6160e52d17ae7c568c9d3 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Fri, 22 Apr 2016 00:33:05 +0300 Subject: net/mlx5e: Use vport MTU rather than physical port MTU Set and report vport MTU rather than physical MTU, Driver will set both vport and physical port mtu and will rely on the query of vport mtu. SRIOV VFs have to report their MTU to their vport manager (PF), and this will allow them to work with any MTU they need without failing the request. Also for some cases where the PF is not a port owner, PF can work with MTU less than the physical port mtu if set physical port mtu didn't take effect. Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 44 ++++++++++++++++++----- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 40 +++++++++++++++++++++ include/linux/mlx5/vport.h | 2 ++ 3 files changed, 77 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 93e4ef43fa9f..85773f8573e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1404,24 +1404,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) return 0; } -static int mlx5e_set_dev_port_mtu(struct net_device *netdev) +static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) { - struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - u16 hw_mtu; + u16 hw_mtu = MLX5E_SW2HW_MTU(mtu); int err; - err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); + err = mlx5_set_port_mtu(mdev, hw_mtu, 1); if (err) return err; - mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); + /* Update vport context MTU */ + mlx5_modify_nic_vport_mtu(mdev, hw_mtu); + return 0; +} + +static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u16 hw_mtu = 0; + int err; + + err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu); + if (err || !hw_mtu) /* fallback to port oper mtu */ + mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); + + *mtu = MLX5E_HW2SW_MTU(hw_mtu); +} + +static int mlx5e_set_dev_port_mtu(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + u16 mtu; + int err; + + err = mlx5e_set_mtu(priv, netdev->mtu); + if (err) + return err; - if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) - netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", - __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); + mlx5e_query_mtu(priv, &mtu); + if (mtu != netdev->mtu) + netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", + __func__, mtu, netdev->mtu); - netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); + netdev->mtu = mtu; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index bd518405859e..b69dadcfb897 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, } EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); +int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) +{ + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + u32 *out; + int err; + + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + err = mlx5_query_nic_vport_context(mdev, 0, out, outlen); + if (!err) + *mtu = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.mtu); + + kvfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu); + +int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) +{ + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + void *in; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1); + MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu); + + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + + kvfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu); + int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, u32 vport, enum mlx5_list_type list_type, diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index bd93e6323603..301da4a5e6bf 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, u16 vport, u8 *addr); +int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); +int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, u64 *system_image_guid); int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); -- cgit v1.2.3-71-gd317 From 5fc7197d3a256d9c5de3134870304b24892a4908 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Fri, 22 Apr 2016 00:33:07 +0300 Subject: net/mlx5: Add pci shutdown callback This patch introduces kexec support for mlx5. When switching kernels, kexec() calls shutdown, which unloads the driver and cleans its resources. In addition, remove unregister netdev from shutdown flow. This will allow a clean shutdown, even if some netdev clients did not release their reference from this netdev. Releasing The HW resources only is enough as the kernel is shutting down Signed-off-by: Majd Dibbiny Signed-off-by: Tariq Toukan Signed-off-by: Haggai Abramovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 15 +++++++++++++-- drivers/net/ethernet/mellanox/mlx5/core/main.c | 23 +++++++++++++++++++---- include/linux/mlx5/driver.h | 7 ++++--- 3 files changed, 36 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 85773f8573e5..67d548b70e14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2633,7 +2633,16 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) schedule_work(&priv->set_rx_mode_work); mlx5e_disable_async_events(priv); flush_scheduled_work(); - unregister_netdev(netdev); + if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { + netif_device_detach(netdev); + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_close_locked(netdev); + mutex_unlock(&priv->state_lock); + } else { + unregister_netdev(netdev); + } + mlx5e_tc_cleanup(priv); mlx5e_vxlan_cleanup(priv); mlx5e_destroy_flow_tables(priv); @@ -2646,7 +2655,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); - free_netdev(netdev); + + if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) + free_netdev(netdev); } static void *mlx5e_get_netdev(void *vpriv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index ddd352ae3dbd..6892746fd10d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) int err; mutex_lock(&dev->intf_state_mutex); - if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", __func__); goto out; @@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) if (err) pr_info("failed request module on %s\n", MLX5_IB_MOD); - dev->interface_state = MLX5_INTERFACE_STATE_UP; + clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); + set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); out: mutex_unlock(&dev->intf_state_mutex); @@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) } mutex_lock(&dev->intf_state_mutex); - if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { + if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", __func__); goto out; @@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_cmd_cleanup(dev); out: - dev->interface_state = MLX5_INTERFACE_STATE_DOWN; + clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); mutex_unlock(&dev->intf_state_mutex); return err; } @@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = { .resume = mlx5_pci_resume }; +static void shutdown(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_priv *priv = &dev->priv; + + dev_info(&pdev->dev, "Shutdown was called\n"); + /* Notify mlx5 clients that the kernel is being shut down */ + set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); + mlx5_unload_one(dev, priv); + mlx5_pci_disable_device(dev); +} + static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ @@ -1471,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = { .id_table = mlx5_core_pci_table, .probe = init_one, .remove = remove_one, + .shutdown = shutdown, .err_handler = &mlx5_err_handler, .sriov_configure = mlx5_core_sriov_configure, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index dcd5ac8d3b14..369c837d40f5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -519,8 +519,9 @@ enum mlx5_device_state { }; enum mlx5_interface_state { - MLX5_INTERFACE_STATE_DOWN, - MLX5_INTERFACE_STATE_UP, + MLX5_INTERFACE_STATE_DOWN = BIT(0), + MLX5_INTERFACE_STATE_UP = BIT(1), + MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), }; enum mlx5_pci_status { @@ -544,7 +545,7 @@ struct mlx5_core_dev { enum mlx5_device_state state; /* sync interface state */ struct mutex intf_state_mutex; - enum mlx5_interface_state interface_state; + unsigned long intf_state; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); -- cgit v1.2.3-71-gd317 From 3020ca711871fdaf0c15c8bab677a6bc302e28fe Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 22 Apr 2016 04:00:50 -0300 Subject: [media] v4l2-dv-timings.h: fix polarity for 4k formats The VSync polarity was negative instead of positive for the 4k CEA formats. I probably copy-and-pasted these from the DMT 4k format, which does have a negative VSync polarity. Signed-off-by: Hans Verkuil Reported-by: Martin Bugge Cc: # for v4.1 and up Signed-off-by: Mauro Carvalho Chehab --- include/uapi/linux/v4l2-dv-timings.h | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h index c039f1d68a09..086168e18ca8 100644 --- a/include/uapi/linux/v4l2-dv-timings.h +++ b/include/uapi/linux/v4l2-dv-timings.h @@ -183,7 +183,8 @@ #define V4L2_DV_BT_CEA_3840X2160P24 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, \ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ @@ -191,14 +192,16 @@ #define V4L2_DV_BT_CEA_3840X2160P25 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ } #define V4L2_DV_BT_CEA_3840X2160P30 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, \ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ @@ -206,14 +209,16 @@ #define V4L2_DV_BT_CEA_3840X2160P50 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ } #define V4L2_DV_BT_CEA_3840X2160P60 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, \ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ @@ -221,7 +226,8 @@ #define V4L2_DV_BT_CEA_4096X2160P24 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, \ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ @@ -229,14 +235,16 @@ #define V4L2_DV_BT_CEA_4096X2160P25 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ } #define V4L2_DV_BT_CEA_4096X2160P30 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, \ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ @@ -244,14 +252,16 @@ #define V4L2_DV_BT_CEA_4096X2160P50 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ } #define V4L2_DV_BT_CEA_4096X2160P60 { \ .type = V4L2_DV_BT_656_1120, \ - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ V4L2_DV_BT_STD_CEA861, \ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ -- cgit v1.2.3-71-gd317 From b93876845c5e30a92964eeb088d9d2e024118022 Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Date: Mon, 25 Apr 2016 06:04:45 -0300 Subject: [media] media: vb2: Fix regression on poll() for RW mode When using a device is read/write mode, vb2 does not handle properly the first select/poll operation. The reason for this, is that when this code has been refactored, some of the operations have changed their order, and now fileio emulator is not started. The reintroduced check to the core is enabled by a quirk flag, that avoids this check by other subsystems like DVB. Fixes: 49d8ab9feaf2 ("media] media: videobuf2: Separate vb2_poll()") Reported-by: Dimitrios Katsaros Cc: Junghak Sung Cc: # for v4.5 and up Signed-off-by: Ricardo Ribalda Delgado Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/v4l2-core/videobuf2-core.c | 10 ++++++++++ drivers/media/v4l2-core/videobuf2-v4l2.c | 14 ++++++-------- include/media/videobuf2-core.h | 4 ++++ 3 files changed, 20 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 2169544883e8..9fbcb67a9ee6 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -2297,6 +2297,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file, if (!vb2_is_streaming(q) || q->error) return POLLERR; + /* + * If this quirk is set and QBUF hasn't been called yet then + * return POLLERR as well. This only affects capture queues, output + * queues will always initialize waiting_for_buffers to false. + * This quirk is set by V4L2 for backwards compatibility reasons. + */ + if (q->quirk_poll_must_check_waiting_for_buffers && + q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM))) + return POLLERR; + /* * For output streams you can call write() as long as there are fewer * buffers queued than there are buffers available. diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c index 8da7470ca364..7f366f1b0377 100644 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c @@ -771,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q) q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == V4L2_BUF_FLAG_TIMESTAMP_COPY; + /* + * For compatibility with vb1: if QBUF hasn't been called yet, then + * return POLLERR as well. This only affects capture queues, output + * queues will always initialize waiting_for_buffers to false. + */ + q->quirk_poll_must_check_waiting_for_buffers = true; return vb2_core_queue_init(q); } @@ -824,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) poll_wait(file, &fh->wait, wait); } - /* - * For compatibility with vb1: if QBUF hasn't been called yet, then - * return POLLERR as well. This only affects capture queues, output - * queues will always initialize waiting_for_buffers to false. - */ - if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM))) - return POLLERR; - return res | vb2_core_poll(q, file, wait); } EXPORT_SYMBOL_GPL(vb2_poll); diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 5342ff4d748f..88e3ab496e8f 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -404,6 +404,9 @@ struct vb2_buf_ops { * @fileio_read_once: report EOF after reading the first buffer * @fileio_write_immediately: queue buffer after each write() call * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver + * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF + * has not been called. This is a vb1 idiom that has been adopted + * also by vb2. * @lock: pointer to a mutex that protects the vb2_queue struct. The * driver can set this to a mutex to let the v4l2 core serialize * the queuing ioctls. If the driver wants to handle locking @@ -467,6 +470,7 @@ struct vb2_queue { unsigned fileio_read_once:1; unsigned fileio_write_immediately:1; unsigned allow_zero_bytesused:1; + unsigned quirk_poll_must_check_waiting_for_buffers:1; struct mutex *lock; void *owner; -- cgit v1.2.3-71-gd317 From 6c1ea260f89709e0021d2c59f8fd2a104b5b1123 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 11 Apr 2016 19:34:49 +0200 Subject: libceph: make authorizer destruction independent of ceph_auth_client Starting the kernel client with cephx disabled and then enabling cephx and restarting userspace daemons can result in a crash: [262671.478162] BUG: unable to handle kernel paging request at ffffebe000000000 [262671.531460] IP: [] kfree+0x5a/0x130 [262671.584334] PGD 0 [262671.635847] Oops: 0000 [#1] SMP [262672.055841] CPU: 22 PID: 2961272 Comm: kworker/22:2 Not tainted 4.2.0-34-generic #39~14.04.1-Ubuntu [262672.162338] Hardware name: Dell Inc. PowerEdge R720/068CDY, BIOS 2.4.3 07/09/2014 [262672.268937] Workqueue: ceph-msgr con_work [libceph] [262672.322290] task: ffff88081c2d0dc0 ti: ffff880149ae8000 task.ti: ffff880149ae8000 [262672.428330] RIP: 0010:[] [] kfree+0x5a/0x130 [262672.535880] RSP: 0018:ffff880149aeba58 EFLAGS: 00010286 [262672.589486] RAX: 000001e000000000 RBX: 0000000000000012 RCX: ffff8807e7461018 [262672.695980] RDX: 000077ff80000000 RSI: ffff88081af2be04 RDI: 0000000000000012 [262672.803668] RBP: ffff880149aeba78 R08: 0000000000000000 R09: 0000000000000000 [262672.912299] R10: ffffebe000000000 R11: ffff880819a60e78 R12: ffff8800aec8df40 [262673.021769] R13: ffffffffc035f70f R14: ffff8807e5b138e0 R15: ffff880da9785840 [262673.131722] FS: 0000000000000000(0000) GS:ffff88081fac0000(0000) knlGS:0000000000000000 [262673.245377] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [262673.303281] CR2: ffffebe000000000 CR3: 0000000001c0d000 CR4: 00000000001406e0 [262673.417556] Stack: [262673.472943] ffff880149aeba88 ffff88081af2be04 ffff8800aec8df40 ffff88081af2be04 [262673.583767] ffff880149aeba98 ffffffffc035f70f ffff880149aebac8 ffff8800aec8df00 [262673.694546] ffff880149aebac8 ffffffffc035c89e ffff8807e5b138e0 ffff8805b047f800 [262673.805230] Call Trace: [262673.859116] [] ceph_x_destroy_authorizer+0x1f/0x50 [libceph] [262673.968705] [] ceph_auth_destroy_authorizer+0x3e/0x60 [libceph] [262674.078852] [] put_osd+0x45/0x80 [libceph] [262674.134249] [] remove_osd+0xae/0x140 [libceph] [262674.189124] [] __reset_osd+0x103/0x150 [libceph] [262674.243749] [] kick_requests+0x223/0x460 [libceph] [262674.297485] [] ceph_osdc_handle_map+0x282/0x5e0 [libceph] [262674.350813] [] dispatch+0x4e/0x720 [libceph] [262674.403312] [] try_read+0x3d1/0x1090 [libceph] [262674.454712] [] ? dequeue_entity+0x152/0x690 [262674.505096] [] con_work+0xcb/0x1300 [libceph] [262674.555104] [] process_one_work+0x14e/0x3d0 [262674.604072] [] worker_thread+0x11a/0x470 [262674.652187] [] ? rescuer_thread+0x310/0x310 [262674.699022] [] kthread+0xd2/0xf0 [262674.744494] [] ? kthread_create_on_node+0x1c0/0x1c0 [262674.789543] [] ret_from_fork+0x3f/0x70 [262674.834094] [] ? kthread_create_on_node+0x1c0/0x1c0 What happens is the following: (1) new MON session is established (2) old "none" ac is destroyed (3) new "cephx" ac is constructed ... (4) old OSD session (w/ "none" authorizer) is put ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer) osd->o_auth.authorizer in the "none" case is just a bare pointer into ac, which contains a single static copy for all services. By the time we get to (4), "none" ac, freed in (2), is long gone. On top of that, a new vtable installed in (3) points us at ceph_x_destroy_authorizer(), so we end up trying to destroy a "none" authorizer with a "cephx" destructor operating on invalid memory! To fix this, decouple authorizer destruction from ac and do away with a single static "none" authorizer by making a copy for each OSD or MDS session. Authorizers themselves are independent of ac and so there is no reason for destroy_authorizer() to be an ac op. Make it an op on the authorizer itself by turning ceph_authorizer into a real struct. Fixes: http://tracker.ceph.com/issues/15447 Reported-by: Alan Zhang Signed-off-by: Ilya Dryomov Reviewed-by: Sage Weil --- fs/ceph/mds_client.c | 6 ++-- include/linux/ceph/auth.h | 10 +++--- include/linux/ceph/osd_client.h | 1 - net/ceph/auth.c | 8 ++--- net/ceph/auth_none.c | 71 ++++++++++++++++++++++------------------- net/ceph/auth_none.h | 3 +- net/ceph/auth_x.c | 21 ++++++------ net/ceph/auth_x.h | 1 + net/ceph/osd_client.c | 6 ++-- 9 files changed, 62 insertions(+), 65 deletions(-) (limited to 'include') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 541ead4d8965..85b8517f17a0 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -386,9 +386,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s) atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); if (atomic_dec_and_test(&s->s_ref)) { if (s->s_auth.authorizer) - ceph_auth_destroy_authorizer( - s->s_mdsc->fsc->client->monc.auth, - s->s_auth.authorizer); + ceph_auth_destroy_authorizer(s->s_auth.authorizer); kfree(s); } } @@ -3900,7 +3898,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, struct ceph_auth_handshake *auth = &s->s_auth; if (force_new && auth->authorizer) { - ceph_auth_destroy_authorizer(ac, auth->authorizer); + ceph_auth_destroy_authorizer(auth->authorizer); auth->authorizer = NULL; } if (!auth->authorizer) { diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 260d78b587c4..1563265d2097 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -12,9 +12,12 @@ */ struct ceph_auth_client; -struct ceph_authorizer; struct ceph_msg; +struct ceph_authorizer { + void (*destroy)(struct ceph_authorizer *); +}; + struct ceph_auth_handshake { struct ceph_authorizer *authorizer; void *authorizer_buf; @@ -62,8 +65,6 @@ struct ceph_auth_client_ops { struct ceph_auth_handshake *auth); int (*verify_authorizer_reply)(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len); - void (*destroy_authorizer)(struct ceph_auth_client *ac, - struct ceph_authorizer *a); void (*invalidate_authorizer)(struct ceph_auth_client *ac, int peer_type); @@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth); -extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, - struct ceph_authorizer *a); +void ceph_auth_destroy_authorizer(struct ceph_authorizer *a); extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *a); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 4343df806710..cbf460927c42 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -16,7 +16,6 @@ struct ceph_msg; struct ceph_snap_context; struct ceph_osd_request; struct ceph_osd_client; -struct ceph_authorizer; /* * completion callback for async writepages diff --git a/net/ceph/auth.c b/net/ceph/auth.c index 6b923bcaa2a4..2bc5965fdd1e 100644 --- a/net/ceph/auth.c +++ b/net/ceph/auth.c @@ -293,13 +293,9 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac, } EXPORT_SYMBOL(ceph_auth_create_authorizer); -void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, - struct ceph_authorizer *a) +void ceph_auth_destroy_authorizer(struct ceph_authorizer *a) { - mutex_lock(&ac->mutex); - if (ac->ops && ac->ops->destroy_authorizer) - ac->ops->destroy_authorizer(ac, a); - mutex_unlock(&ac->mutex); + a->destroy(a); } EXPORT_SYMBOL(ceph_auth_destroy_authorizer); diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c index 8c93fa8d81bc..5f836f02ae36 100644 --- a/net/ceph/auth_none.c +++ b/net/ceph/auth_none.c @@ -16,7 +16,6 @@ static void reset(struct ceph_auth_client *ac) struct ceph_auth_none_info *xi = ac->private; xi->starting = true; - xi->built_authorizer = false; } static void destroy(struct ceph_auth_client *ac) @@ -39,6 +38,27 @@ static int should_authenticate(struct ceph_auth_client *ac) return xi->starting; } +static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac, + struct ceph_none_authorizer *au) +{ + void *p = au->buf; + void *const end = p + sizeof(au->buf); + int ret; + + ceph_encode_8_safe(&p, end, 1, e_range); + ret = ceph_entity_name_encode(ac->name, &p, end); + if (ret < 0) + return ret; + + ceph_encode_64_safe(&p, end, ac->global_id, e_range); + au->buf_len = p - (void *)au->buf; + dout("%s built authorizer len %d\n", __func__, au->buf_len); + return 0; + +e_range: + return -ERANGE; +} + static int build_request(struct ceph_auth_client *ac, void *buf, void *end) { return 0; @@ -57,32 +77,32 @@ static int handle_reply(struct ceph_auth_client *ac, int result, return result; } +static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a) +{ + kfree(a); +} + /* - * build an 'authorizer' with our entity_name and global_id. we can - * reuse a single static copy since it is identical for all services - * we connect to. + * build an 'authorizer' with our entity_name and global_id. it is + * identical for all services we connect to. */ static int ceph_auth_none_create_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { - struct ceph_auth_none_info *ai = ac->private; - struct ceph_none_authorizer *au = &ai->au; - void *p, *end; + struct ceph_none_authorizer *au; int ret; - if (!ai->built_authorizer) { - p = au->buf; - end = p + sizeof(au->buf); - ceph_encode_8(&p, 1); - ret = ceph_entity_name_encode(ac->name, &p, end - 8); - if (ret < 0) - goto bad; - ceph_decode_need(&p, end, sizeof(u64), bad2); - ceph_encode_64(&p, ac->global_id); - au->buf_len = p - (void *)au->buf; - ai->built_authorizer = true; - dout("built authorizer len %d\n", au->buf_len); + au = kmalloc(sizeof(*au), GFP_NOFS); + if (!au) + return -ENOMEM; + + au->base.destroy = ceph_auth_none_destroy_authorizer; + + ret = ceph_auth_none_build_authorizer(ac, au); + if (ret) { + kfree(au); + return ret; } auth->authorizer = (struct ceph_authorizer *) au; @@ -92,17 +112,6 @@ static int ceph_auth_none_create_authorizer( auth->authorizer_reply_buf_len = sizeof (au->reply_buf); return 0; - -bad2: - ret = -ERANGE; -bad: - return ret; -} - -static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac, - struct ceph_authorizer *a) -{ - /* nothing to do */ } static const struct ceph_auth_client_ops ceph_auth_none_ops = { @@ -114,7 +123,6 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = { .build_request = build_request, .handle_reply = handle_reply, .create_authorizer = ceph_auth_none_create_authorizer, - .destroy_authorizer = ceph_auth_none_destroy_authorizer, }; int ceph_auth_none_init(struct ceph_auth_client *ac) @@ -127,7 +135,6 @@ int ceph_auth_none_init(struct ceph_auth_client *ac) return -ENOMEM; xi->starting = true; - xi->built_authorizer = false; ac->protocol = CEPH_AUTH_NONE; ac->private = xi; diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h index 059a3ce4b53f..62021535ae4a 100644 --- a/net/ceph/auth_none.h +++ b/net/ceph/auth_none.h @@ -12,6 +12,7 @@ */ struct ceph_none_authorizer { + struct ceph_authorizer base; char buf[128]; int buf_len; char reply_buf[0]; @@ -19,8 +20,6 @@ struct ceph_none_authorizer { struct ceph_auth_none_info { bool starting; - bool built_authorizer; - struct ceph_none_authorizer au; /* we only need one; it's static */ }; int ceph_auth_none_init(struct ceph_auth_client *ac); diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 9e43a315e662..a0905f04bd13 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -565,6 +565,14 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, return -EAGAIN; } +static void ceph_x_destroy_authorizer(struct ceph_authorizer *a) +{ + struct ceph_x_authorizer *au = (void *)a; + + ceph_x_authorizer_cleanup(au); + kfree(au); +} + static int ceph_x_create_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) @@ -581,6 +589,8 @@ static int ceph_x_create_authorizer( if (!au) return -ENOMEM; + au->base.destroy = ceph_x_destroy_authorizer; + ret = ceph_x_build_authorizer(ac, th, au); if (ret) { kfree(au); @@ -643,16 +653,6 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, return ret; } -static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, - struct ceph_authorizer *a) -{ - struct ceph_x_authorizer *au = (void *)a; - - ceph_x_authorizer_cleanup(au); - kfree(au); -} - - static void ceph_x_reset(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; @@ -770,7 +770,6 @@ static const struct ceph_auth_client_ops ceph_x_ops = { .create_authorizer = ceph_x_create_authorizer, .update_authorizer = ceph_x_update_authorizer, .verify_authorizer_reply = ceph_x_verify_authorizer_reply, - .destroy_authorizer = ceph_x_destroy_authorizer, .invalidate_authorizer = ceph_x_invalidate_authorizer, .reset = ceph_x_reset, .destroy = ceph_x_destroy, diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h index 40b1a3cf7397..21a5af904bae 100644 --- a/net/ceph/auth_x.h +++ b/net/ceph/auth_x.h @@ -26,6 +26,7 @@ struct ceph_x_ticket_handler { struct ceph_x_authorizer { + struct ceph_authorizer base; struct ceph_crypto_key session_key; struct ceph_buffer *buf; unsigned int service; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 32355d9d0103..40a53a70efdf 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1087,10 +1087,8 @@ static void put_osd(struct ceph_osd *osd) dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), atomic_read(&osd->o_ref) - 1); if (atomic_dec_and_test(&osd->o_ref)) { - struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; - if (osd->o_auth.authorizer) - ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); + ceph_auth_destroy_authorizer(osd->o_auth.authorizer); kfree(osd); } } @@ -2984,7 +2982,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, struct ceph_auth_handshake *auth = &o->o_auth; if (force_new && auth->authorizer) { - ceph_auth_destroy_authorizer(ac, auth->authorizer); + ceph_auth_destroy_authorizer(auth->authorizer); auth->authorizer = NULL; } if (!auth->authorizer) { -- cgit v1.2.3-71-gd317 From 841645b5f2dfceac69b78fcd0c9050868d41ea61 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 25 Apr 2016 15:33:55 -0400 Subject: ipv6: Revert optional address flusing on ifdown. This reverts the following three commits: 70af921db6f8835f4b11c65731116560adb00c14 799977d9aafbf0ca0b9c39b04cbfb16db71302c9 f1705ec197e705b79ea40fe7a2cc5acfa1d3bfac The feature was ill conceived, has terrible semantics, and has added nothing but regressions to the already fragile ipv6 stack. Fixes: f1705ec197e7 ("net: ipv6: Make address flushing on ifdown optional") Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 9 -- include/linux/ipv6.h | 1 - include/uapi/linux/ipv6.h | 1 - net/ipv6/addrconf.c | 162 +++------------------------------ 4 files changed, 12 insertions(+), 161 deletions(-) (limited to 'include') diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index b183e2b606c8..e0ac25210f7c 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1568,15 +1568,6 @@ temp_prefered_lft - INTEGER Preferred lifetime (in seconds) for temporary addresses. Default: 86400 (1 day) -keep_addr_on_down - INTEGER - Keep all IPv6 addresses on an interface down event. If set static - global addresses with no expiration time are not flushed. - >0 : enabled - 0 : system default - <0 : disabled - - Default: 0 (addresses are removed) - max_desync_factor - INTEGER Maximum value for DESYNC_FACTOR, which is a random value that ensures that clients don't synchronize with each diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 7edc14fb66b6..4b2267e1b7c3 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -62,7 +62,6 @@ struct ipv6_devconf { struct in6_addr secret; } stable_secret; __s32 use_oif_addrs_only; - __s32 keep_addr_on_down; void *sysctl; }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 395876060f50..ec117b65d5a5 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -176,7 +176,6 @@ enum { DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, DEVCONF_DROP_UNSOLICITED_NA, - DEVCONF_KEEP_ADDR_ON_DOWN, DEVCONF_MAX }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 23cec53b568a..d77ba395d593 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -216,7 +216,6 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { }, .use_oif_addrs_only = 0, .ignore_routes_with_linkdown = 0, - .keep_addr_on_down = 0, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -261,7 +260,6 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { }, .use_oif_addrs_only = 0, .ignore_routes_with_linkdown = 0, - .keep_addr_on_down = 0, }; /* Check if a valid qdisc is available */ @@ -3176,81 +3174,6 @@ static void addrconf_gre_config(struct net_device *dev) } #endif -#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) -/* If the host route is cached on the addr struct make sure it is associated - * with the proper table. e.g., enslavement can change and if so the cached - * host route needs to move to the new table. - */ -static void l3mdev_check_host_rt(struct inet6_dev *idev, - struct inet6_ifaddr *ifp) -{ - if (ifp->rt) { - u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; - - if (tb_id != ifp->rt->rt6i_table->tb6_id) { - ip6_del_rt(ifp->rt); - ifp->rt = NULL; - } - } -} -#else -static void l3mdev_check_host_rt(struct inet6_dev *idev, - struct inet6_ifaddr *ifp) -{ -} -#endif - -static int fixup_permanent_addr(struct inet6_dev *idev, - struct inet6_ifaddr *ifp) -{ - l3mdev_check_host_rt(idev, ifp); - - if (!ifp->rt) { - struct rt6_info *rt; - - rt = addrconf_dst_alloc(idev, &ifp->addr, false); - if (unlikely(IS_ERR(rt))) - return PTR_ERR(rt); - - ifp->rt = rt; - } - - if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) { - addrconf_prefix_route(&ifp->addr, ifp->prefix_len, - idev->dev, 0, 0); - } - - addrconf_dad_start(ifp); - - return 0; -} - -static void addrconf_permanent_addr(struct net_device *dev) -{ - struct inet6_ifaddr *ifp, *tmp; - struct inet6_dev *idev; - - idev = __in6_dev_get(dev); - if (!idev) - return; - - write_lock_bh(&idev->lock); - - list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) { - if ((ifp->flags & IFA_F_PERMANENT) && - fixup_permanent_addr(idev, ifp) < 0) { - write_unlock_bh(&idev->lock); - ipv6_del_addr(ifp); - write_lock_bh(&idev->lock); - - net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n", - idev->dev->name, &ifp->addr); - } - } - - write_unlock_bh(&idev->lock); -} - static int addrconf_notify(struct notifier_block *this, unsigned long event, void *ptr) { @@ -3337,9 +3260,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, run_pending = 1; } - /* restore routes for permanent addresses */ - addrconf_permanent_addr(dev); - switch (dev->type) { #if IS_ENABLED(CONFIG_IPV6_SIT) case ARPHRD_SIT: @@ -3448,20 +3368,11 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event) ipv6_mc_unmap(idev); } -static bool addr_is_local(const struct in6_addr *addr) -{ - return ipv6_addr_type(addr) & - (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); -} - static int addrconf_ifdown(struct net_device *dev, int how) { struct net *net = dev_net(dev); struct inet6_dev *idev; - struct inet6_ifaddr *ifa, *tmp; - struct list_head del_list; - int _keep_addr; - bool keep_addr; + struct inet6_ifaddr *ifa; int state, i; ASSERT_RTNL(); @@ -3488,16 +3399,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) } - /* aggregate the system setting and interface setting */ - _keep_addr = net->ipv6.devconf_all->keep_addr_on_down; - if (!_keep_addr) - _keep_addr = idev->cnf.keep_addr_on_down; - - /* combine the user config with event to determine if permanent - * addresses are to be removed from address hash table - */ - keep_addr = !(how || _keep_addr <= 0); - /* Step 2: clear hash table */ for (i = 0; i < IN6_ADDR_HSIZE; i++) { struct hlist_head *h = &inet6_addr_lst[i]; @@ -3506,16 +3407,9 @@ static int addrconf_ifdown(struct net_device *dev, int how) restart: hlist_for_each_entry_rcu(ifa, h, addr_lst) { if (ifa->idev == idev) { + hlist_del_init_rcu(&ifa->addr_lst); addrconf_del_dad_work(ifa); - /* combined flag + permanent flag decide if - * address is retained on a down event - */ - if (!keep_addr || - !(ifa->flags & IFA_F_PERMANENT) || - addr_is_local(&ifa->addr)) { - hlist_del_init_rcu(&ifa->addr_lst); - goto restart; - } + goto restart; } } spin_unlock_bh(&addrconf_hash_lock); @@ -3549,54 +3443,31 @@ restart: write_lock_bh(&idev->lock); } - /* re-combine the user config with event to determine if permanent - * addresses are to be removed from the interface list - */ - keep_addr = (!how && _keep_addr > 0); - - INIT_LIST_HEAD(&del_list); - list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { + while (!list_empty(&idev->addr_list)) { + ifa = list_first_entry(&idev->addr_list, + struct inet6_ifaddr, if_list); addrconf_del_dad_work(ifa); - write_unlock_bh(&idev->lock); - spin_lock_bh(&ifa->lock); - - if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && - !addr_is_local(&ifa->addr)) { - /* set state to skip the notifier below */ - state = INET6_IFADDR_STATE_DEAD; - ifa->state = 0; - if (!(ifa->flags & IFA_F_NODAD)) - ifa->flags |= IFA_F_TENTATIVE; - } else { - state = ifa->state; - ifa->state = INET6_IFADDR_STATE_DEAD; + list_del(&ifa->if_list); - list_del(&ifa->if_list); - list_add(&ifa->if_list, &del_list); - } + write_unlock_bh(&idev->lock); + spin_lock_bh(&ifa->lock); + state = ifa->state; + ifa->state = INET6_IFADDR_STATE_DEAD; spin_unlock_bh(&ifa->lock); if (state != INET6_IFADDR_STATE_DEAD) { __ipv6_ifa_notify(RTM_DELADDR, ifa); inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); } + in6_ifa_put(ifa); write_lock_bh(&idev->lock); } write_unlock_bh(&idev->lock); - /* now clean up addresses to be removed */ - while (!list_empty(&del_list)) { - ifa = list_first_entry(&del_list, - struct inet6_ifaddr, if_list); - list_del(&ifa->if_list); - - in6_ifa_put(ifa); - } - /* Step 5: Discard anycast and multicast list */ if (how) { ipv6_ac_destroy_dev(idev); @@ -4861,7 +4732,6 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only; array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast; array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na; - array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down; } static inline size_t inet6_ifla6_size(void) @@ -5949,14 +5819,6 @@ static struct addrconf_sysctl_table .mode = 0644, .proc_handler = proc_dointvec, }, - { - .procname = "keep_addr_on_down", - .data = &ipv6_devconf.keep_addr_on_down, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - - }, { /* sentinel */ } -- cgit v1.2.3-71-gd317 From 5cf1cacb49aee39c3e02ae87068fc3c6430659b0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 21 Apr 2016 19:06:48 -0400 Subject: cgroup, cpuset: replace cpuset_post_attach_flush() with cgroup_subsys->post_attach callback Since e93ad19d0564 ("cpuset: make mm migration asynchronous"), cpuset kicks off asynchronous NUMA node migration if necessary during task migration and flushes it from cpuset_post_attach_flush() which is called at the end of __cgroup_procs_write(). This is to avoid performing migration with cgroup_threadgroup_rwsem write-locked which can lead to deadlock through dependency on kworker creation. memcg has a similar issue with charge moving, so let's convert it to an official callback rather than the current one-off cpuset specific function. This patch adds cgroup_subsys->post_attach callback and makes cpuset register cpuset_post_attach_flush() as its ->post_attach. The conversion is mostly one-to-one except that the new callback is called under cgroup_mutex. This is to guarantee that no other migration operations are started before ->post_attach callbacks are finished. cgroup_mutex is one of the outermost mutex in the system and has never been and shouldn't be a problem. We can add specialized synchronization around __cgroup_procs_write() but I don't think there's any noticeable benefit. Signed-off-by: Tejun Heo Cc: Li Zefan Cc: Johannes Weiner Cc: Michal Hocko Cc: # 4.4+ prerequisite for the next patch --- include/linux/cgroup-defs.h | 1 + include/linux/cpuset.h | 6 ------ kernel/cgroup.c | 7 +++++-- kernel/cpuset.c | 4 ++-- 4 files changed, 8 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 3e39ae5bc799..5b17de62c962 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -444,6 +444,7 @@ struct cgroup_subsys { int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); + void (*post_attach)(void); int (*can_fork)(struct task_struct *task); void (*cancel_fork)(struct task_struct *task); void (*fork)(struct task_struct *task); diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index fea160ee5803..85a868ccb493 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask) task_unlock(current); } -extern void cpuset_post_attach_flush(void); - #else /* !CONFIG_CPUSETS */ static inline bool cpusets_enabled(void) { return false; } @@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq) return false; } -static inline void cpuset_post_attach_flush(void) -{ -} - #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 671dc05c0b0f..909a7d31ffd3 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2825,9 +2825,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off, bool threadgroup) { struct task_struct *tsk; + struct cgroup_subsys *ss; struct cgroup *cgrp; pid_t pid; - int ret; + int ssid, ret; if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) return -EINVAL; @@ -2875,8 +2876,10 @@ out_unlock_rcu: rcu_read_unlock(); out_unlock_threadgroup: percpu_up_write(&cgroup_threadgroup_rwsem); + for_each_subsys(ss, ssid) + if (ss->post_attach) + ss->post_attach(); cgroup_kn_unlock(of->kn); - cpuset_post_attach_flush(); return ret ?: nbytes; } diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 00ab5c2b7c5b..1902956baba1 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -58,7 +58,6 @@ #include #include #include -#include #include #include @@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, } } -void cpuset_post_attach_flush(void) +static void cpuset_post_attach(void) { flush_workqueue(cpuset_migrate_mm_wq); } @@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = { .can_attach = cpuset_can_attach, .cancel_attach = cpuset_cancel_attach, .attach = cpuset_attach, + .post_attach = cpuset_post_attach, .bind = cpuset_bind, .legacy_cftypes = files, .early_init = true, -- cgit v1.2.3-71-gd317 From bb03ed216370cb021f377f923471e56d1de3ff5d Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 21 Apr 2016 16:39:17 +0200 Subject: ALSA: hda - Update BCLK also at hotplug for i915 HSW/BDW The recent bug report suggests that BCLK setup for i915 HSW/BDW needs to be updated at each HDMI hotplug, not only at initialization and resume. That is, we need to update HSW_EM4 and HSW_EM5 registers at ELD notification, too. Otherwise the HDMI audio may be out of sync and played in a wrong pitch. However, the HDA codec driver has no access to the controller registers, and currently the code managing these registers is in hda_intel.c, i.e. local to the controller driver. For allowing the explicit BCLK update from the codec driver, as in this patch, the former haswell_set_bclk() in hda_intel.c is moved to hdac_i915.c and exposed as snd_hdac_i915_set_bclk(). This is called from both the HDA controller driver and intel_pin_eld_notify() in HDMI codec driver. Along with this change, snd_hdac_get_display_clk() gets dropped as it's no longer used. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91410 Cc: # v4.5+ Signed-off-by: Takashi Iwai --- include/sound/hda_i915.h | 5 ++-- sound/hda/hdac_i915.c | 62 ++++++++++++++++++++++++++++++++++++++-------- sound/pci/hda/hda_intel.c | 56 +++-------------------------------------- sound/pci/hda/patch_hdmi.c | 1 + 4 files changed, 58 insertions(+), 66 deletions(-) (limited to 'include') diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h index fa341fcb5829..f5842bcd9c94 100644 --- a/include/sound/hda_i915.h +++ b/include/sound/hda_i915.h @@ -9,7 +9,7 @@ #ifdef CONFIG_SND_HDA_I915 int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); int snd_hdac_display_power(struct hdac_bus *bus, bool enable); -int snd_hdac_get_display_clk(struct hdac_bus *bus); +void snd_hdac_i915_set_bclk(struct hdac_bus *bus); int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, bool *audio_enabled, char *buffer, int max_bytes); @@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable) { return 0; } -static inline int snd_hdac_get_display_clk(struct hdac_bus *bus) +static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus) { - return 0; } static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate) diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 54babe1c0b16..607bbeaebddf 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -20,6 +20,7 @@ #include #include #include +#include static struct i915_audio_component *hdac_acomp; @@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable) } EXPORT_SYMBOL_GPL(snd_hdac_display_power); +#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ + ((pci)->device == 0x0c0c) || \ + ((pci)->device == 0x0d0c) || \ + ((pci)->device == 0x160c)) + /** - * snd_hdac_get_display_clk - Get CDCLK in kHz + * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW * @bus: HDA core bus * - * This function is supposed to be used only by a HD-audio controller - * driver that needs the interaction with i915 graphics. + * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK + * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value) + * are used to convert CDClk (Core Display Clock) to 24MHz BCLK: + * BCLK = CDCLK * M / N + * The values will be lost when the display power well is disabled and need to + * be restored to avoid abnormal playback speed. * - * This function queries CDCLK value in kHz from the graphics driver and - * returns the value. A negative code is returned in error. + * Call this function at initializing and changing power well, as well as + * at ELD notifier for the hotplug. */ -int snd_hdac_get_display_clk(struct hdac_bus *bus) +void snd_hdac_i915_set_bclk(struct hdac_bus *bus) { struct i915_audio_component *acomp = bus->audio_component; + struct pci_dev *pci = to_pci_dev(bus->dev); + int cdclk_freq; + unsigned int bclk_m, bclk_n; + + if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq) + return; /* only for i915 binding */ + if (!CONTROLLER_IN_GPU(pci)) + return; /* only HSW/BDW */ + + cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev); + switch (cdclk_freq) { + case 337500: + bclk_m = 16; + bclk_n = 225; + break; + + case 450000: + default: /* default CDCLK 450MHz */ + bclk_m = 4; + bclk_n = 75; + break; + + case 540000: + bclk_m = 4; + bclk_n = 90; + break; + + case 675000: + bclk_m = 8; + bclk_n = 225; + break; + } - if (!acomp || !acomp->ops) - return -ENODEV; - - return acomp->ops->get_cdclk_freq(acomp->dev); + snd_hdac_chip_writew(bus, HSW_EM4, bclk_m); + snd_hdac_chip_writew(bus, HSW_EM5, bclk_n); } -EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk); +EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk); /* There is a fixed mapping between audio pin node and display port * on current Intel platforms: diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 637b8a0e2a91..9a0d1445ca5c 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp) #define azx_del_card_list(chip) /* NOP */ #endif /* CONFIG_PM */ -/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK - * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value) - * are used to convert CDClk (Core Display Clock) to 24MHz BCLK: - * BCLK = CDCLK * M / N - * The values will be lost when the display power well is disabled and need to - * be restored to avoid abnormal playback speed. - */ -static void haswell_set_bclk(struct hda_intel *hda) -{ - struct azx *chip = &hda->chip; - int cdclk_freq; - unsigned int bclk_m, bclk_n; - - if (!hda->need_i915_power) - return; - - cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip)); - switch (cdclk_freq) { - case 337500: - bclk_m = 16; - bclk_n = 225; - break; - - case 450000: - default: /* default CDCLK 450MHz */ - bclk_m = 4; - bclk_n = 75; - break; - - case 540000: - bclk_m = 4; - bclk_n = 90; - break; - - case 675000: - bclk_m = 8; - bclk_n = 225; - break; - } - - azx_writew(chip, HSW_EM4, bclk_m); - azx_writew(chip, HSW_EM5, bclk_n); -} - #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) /* * power management @@ -958,7 +914,7 @@ static int azx_resume(struct device *dev) if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL && hda->need_i915_power) { snd_hdac_display_power(azx_bus(chip), true); - haswell_set_bclk(hda); + snd_hdac_i915_set_bclk(azx_bus(chip)); } if (chip->msi) if (pci_enable_msi(pci) < 0) @@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev) bus = azx_bus(chip); if (hda->need_i915_power) { snd_hdac_display_power(bus, true); - haswell_set_bclk(hda); + snd_hdac_i915_set_bclk(bus); } else { /* toggle codec wakeup bit for STATESTS read */ snd_hdac_set_codec_wakeup(bus, true); @@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip) /* initialize chip */ azx_init_pci(chip); - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { - struct hda_intel *hda; - - hda = container_of(chip, struct hda_intel, chip); - haswell_set_bclk(hda); - } + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) + snd_hdac_i915_set_bclk(bus); hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 40933aa33afe..1483f85999ec 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2232,6 +2232,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port) if (atomic_read(&(codec)->core.in_pm)) return; + snd_hdac_i915_set_bclk(&codec->bus->core); check_presence_and_report(codec, pin_nid); } -- cgit v1.2.3-71-gd317 From 6a923934c33c750a595868af6bef5f1a1fa90054 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 26 Apr 2016 11:47:41 -0400 Subject: Revert "ipv6: Revert optional address flusing on ifdown." This reverts commit 841645b5f2dfceac69b78fcd0c9050868d41ea61. Ok, this puts the feature back. I've decided to apply David A.'s bug fix and run with that rather than make everyone wait another whole release for this feature. Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 9 ++ include/linux/ipv6.h | 1 + include/uapi/linux/ipv6.h | 1 + net/ipv6/addrconf.c | 162 ++++++++++++++++++++++++++++++--- 4 files changed, 161 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index e0ac25210f7c..b183e2b606c8 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1568,6 +1568,15 @@ temp_prefered_lft - INTEGER Preferred lifetime (in seconds) for temporary addresses. Default: 86400 (1 day) +keep_addr_on_down - INTEGER + Keep all IPv6 addresses on an interface down event. If set static + global addresses with no expiration time are not flushed. + >0 : enabled + 0 : system default + <0 : disabled + + Default: 0 (addresses are removed) + max_desync_factor - INTEGER Maximum value for DESYNC_FACTOR, which is a random value that ensures that clients don't synchronize with each diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 4b2267e1b7c3..7edc14fb66b6 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -62,6 +62,7 @@ struct ipv6_devconf { struct in6_addr secret; } stable_secret; __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; void *sysctl; }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index ec117b65d5a5..395876060f50 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -176,6 +176,7 @@ enum { DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, DEVCONF_DROP_UNSOLICITED_NA, + DEVCONF_KEEP_ADDR_ON_DOWN, DEVCONF_MAX }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d77ba395d593..23cec53b568a 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -216,6 +216,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { }, .use_oif_addrs_only = 0, .ignore_routes_with_linkdown = 0, + .keep_addr_on_down = 0, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -260,6 +261,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { }, .use_oif_addrs_only = 0, .ignore_routes_with_linkdown = 0, + .keep_addr_on_down = 0, }; /* Check if a valid qdisc is available */ @@ -3174,6 +3176,81 @@ static void addrconf_gre_config(struct net_device *dev) } #endif +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) +/* If the host route is cached on the addr struct make sure it is associated + * with the proper table. e.g., enslavement can change and if so the cached + * host route needs to move to the new table. + */ +static void l3mdev_check_host_rt(struct inet6_dev *idev, + struct inet6_ifaddr *ifp) +{ + if (ifp->rt) { + u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; + + if (tb_id != ifp->rt->rt6i_table->tb6_id) { + ip6_del_rt(ifp->rt); + ifp->rt = NULL; + } + } +} +#else +static void l3mdev_check_host_rt(struct inet6_dev *idev, + struct inet6_ifaddr *ifp) +{ +} +#endif + +static int fixup_permanent_addr(struct inet6_dev *idev, + struct inet6_ifaddr *ifp) +{ + l3mdev_check_host_rt(idev, ifp); + + if (!ifp->rt) { + struct rt6_info *rt; + + rt = addrconf_dst_alloc(idev, &ifp->addr, false); + if (unlikely(IS_ERR(rt))) + return PTR_ERR(rt); + + ifp->rt = rt; + } + + if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) { + addrconf_prefix_route(&ifp->addr, ifp->prefix_len, + idev->dev, 0, 0); + } + + addrconf_dad_start(ifp); + + return 0; +} + +static void addrconf_permanent_addr(struct net_device *dev) +{ + struct inet6_ifaddr *ifp, *tmp; + struct inet6_dev *idev; + + idev = __in6_dev_get(dev); + if (!idev) + return; + + write_lock_bh(&idev->lock); + + list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) { + if ((ifp->flags & IFA_F_PERMANENT) && + fixup_permanent_addr(idev, ifp) < 0) { + write_unlock_bh(&idev->lock); + ipv6_del_addr(ifp); + write_lock_bh(&idev->lock); + + net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n", + idev->dev->name, &ifp->addr); + } + } + + write_unlock_bh(&idev->lock); +} + static int addrconf_notify(struct notifier_block *this, unsigned long event, void *ptr) { @@ -3260,6 +3337,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, run_pending = 1; } + /* restore routes for permanent addresses */ + addrconf_permanent_addr(dev); + switch (dev->type) { #if IS_ENABLED(CONFIG_IPV6_SIT) case ARPHRD_SIT: @@ -3368,11 +3448,20 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event) ipv6_mc_unmap(idev); } +static bool addr_is_local(const struct in6_addr *addr) +{ + return ipv6_addr_type(addr) & + (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); +} + static int addrconf_ifdown(struct net_device *dev, int how) { struct net *net = dev_net(dev); struct inet6_dev *idev; - struct inet6_ifaddr *ifa; + struct inet6_ifaddr *ifa, *tmp; + struct list_head del_list; + int _keep_addr; + bool keep_addr; int state, i; ASSERT_RTNL(); @@ -3399,6 +3488,16 @@ static int addrconf_ifdown(struct net_device *dev, int how) } + /* aggregate the system setting and interface setting */ + _keep_addr = net->ipv6.devconf_all->keep_addr_on_down; + if (!_keep_addr) + _keep_addr = idev->cnf.keep_addr_on_down; + + /* combine the user config with event to determine if permanent + * addresses are to be removed from address hash table + */ + keep_addr = !(how || _keep_addr <= 0); + /* Step 2: clear hash table */ for (i = 0; i < IN6_ADDR_HSIZE; i++) { struct hlist_head *h = &inet6_addr_lst[i]; @@ -3407,9 +3506,16 @@ static int addrconf_ifdown(struct net_device *dev, int how) restart: hlist_for_each_entry_rcu(ifa, h, addr_lst) { if (ifa->idev == idev) { - hlist_del_init_rcu(&ifa->addr_lst); addrconf_del_dad_work(ifa); - goto restart; + /* combined flag + permanent flag decide if + * address is retained on a down event + */ + if (!keep_addr || + !(ifa->flags & IFA_F_PERMANENT) || + addr_is_local(&ifa->addr)) { + hlist_del_init_rcu(&ifa->addr_lst); + goto restart; + } } } spin_unlock_bh(&addrconf_hash_lock); @@ -3443,31 +3549,54 @@ restart: write_lock_bh(&idev->lock); } - while (!list_empty(&idev->addr_list)) { - ifa = list_first_entry(&idev->addr_list, - struct inet6_ifaddr, if_list); - addrconf_del_dad_work(ifa); + /* re-combine the user config with event to determine if permanent + * addresses are to be removed from the interface list + */ + keep_addr = (!how && _keep_addr > 0); - list_del(&ifa->if_list); + INIT_LIST_HEAD(&del_list); + list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { + addrconf_del_dad_work(ifa); write_unlock_bh(&idev->lock); - spin_lock_bh(&ifa->lock); - state = ifa->state; - ifa->state = INET6_IFADDR_STATE_DEAD; + + if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && + !addr_is_local(&ifa->addr)) { + /* set state to skip the notifier below */ + state = INET6_IFADDR_STATE_DEAD; + ifa->state = 0; + if (!(ifa->flags & IFA_F_NODAD)) + ifa->flags |= IFA_F_TENTATIVE; + } else { + state = ifa->state; + ifa->state = INET6_IFADDR_STATE_DEAD; + + list_del(&ifa->if_list); + list_add(&ifa->if_list, &del_list); + } + spin_unlock_bh(&ifa->lock); if (state != INET6_IFADDR_STATE_DEAD) { __ipv6_ifa_notify(RTM_DELADDR, ifa); inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); } - in6_ifa_put(ifa); write_lock_bh(&idev->lock); } write_unlock_bh(&idev->lock); + /* now clean up addresses to be removed */ + while (!list_empty(&del_list)) { + ifa = list_first_entry(&del_list, + struct inet6_ifaddr, if_list); + list_del(&ifa->if_list); + + in6_ifa_put(ifa); + } + /* Step 5: Discard anycast and multicast list */ if (how) { ipv6_ac_destroy_dev(idev); @@ -4732,6 +4861,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only; array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast; array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na; + array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down; } static inline size_t inet6_ifla6_size(void) @@ -5819,6 +5949,14 @@ static struct addrconf_sysctl_table .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "keep_addr_on_down", + .data = &ipv6_devconf.keep_addr_on_down, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + + }, { /* sentinel */ } -- cgit v1.2.3-71-gd317 From 8ead9dd54716d1e05e129959f702fcc1786f82b4 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 25 Apr 2016 20:04:08 -0700 Subject: devpts: more pty driver interface cleanups This is more prep-work for the upcoming pty changes. Still just code cleanup with no actual semantic changes. This removes a bunch pointless complexity by just having the slave pty side remember the dentry associated with the devpts slave rather than the inode. That allows us to remove all the "look up the dentry" code for when we want to remove it again. Together with moving the tty pointer from "inode->i_private" to "dentry->d_fsdata" and getting rid of pointless inode locking, this removes about 30 lines of code. Not only is the end result smaller, it's simpler and easier to understand. The old code, for example, depended on the d_find_alias() to not just find the dentry, but also to check that it is still hashed, which in turn validated the tty pointer in the inode. That is a _very_ roundabout way to say "invalidate the cached tty pointer when the dentry is removed". The new code just does dentry->d_fsdata = NULL; in devpts_pty_kill() instead, invalidating the tty pointer rather more directly and obviously. Don't do something complex and subtle when the obvious straightforward approach will do. The rest of the patch (ie apart from code deletion and the above tty pointer clearing) is just switching the calling convention to pass the dentry or file pointer around instead of the inode. Cc: Eric Biederman Cc: Peter Anvin Cc: Andy Lutomirski Cc: Al Viro Cc: Peter Hurley Cc: Serge Hallyn Cc: Willy Tarreau Cc: Aurelien Jarno Cc: Alan Cox Cc: Jann Horn Cc: Greg KH Cc: Jiri Slaby Cc: Florian Weimer Signed-off-by: Linus Torvalds --- drivers/tty/pty.c | 18 +++++++--------- drivers/tty/tty_io.c | 6 +++--- fs/devpts/inode.c | 53 +++++++++++----------------------------------- include/linux/devpts_fs.h | 6 +++--- include/linux/tty_driver.h | 4 ++-- 5 files changed, 28 insertions(+), 59 deletions(-) (limited to 'include') diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 0058d9fbf931..cf0dc51a2690 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty, */ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, - struct inode *ptm_inode, int idx) + struct file *file, int idx) { /* Master must be open via /dev/ptmx */ return ERR_PTR(-EIO); @@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, */ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver, - struct inode *pts_inode, int idx) + struct file *file, int idx) { struct tty_struct *tty; mutex_lock(&devpts_mutex); - tty = devpts_get_priv(pts_inode); + tty = devpts_get_priv(file->f_path.dentry); mutex_unlock(&devpts_mutex); /* Master must be open before slave */ if (!tty) @@ -722,7 +722,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) { struct pts_fs_info *fsi; struct tty_struct *tty; - struct inode *slave_inode; + struct dentry *dentry; int retval; int index; @@ -769,14 +769,12 @@ static int ptmx_open(struct inode *inode, struct file *filp) tty_add_file(tty, filp); - slave_inode = devpts_pty_new(fsi, - MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index, - tty->link); - if (IS_ERR(slave_inode)) { - retval = PTR_ERR(slave_inode); + dentry = devpts_pty_new(fsi, index, tty->link); + if (IS_ERR(dentry)) { + retval = PTR_ERR(dentry); goto err_release; } - tty->link->driver_data = slave_inode; + tty->link->driver_data = dentry; retval = ptm_driver->ops->open(tty, filp); if (retval) diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 9b04d72e752e..24d5491ef0da 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) * Locking: tty_mutex must be held. If the tty is found, bump the tty kref. */ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, - struct inode *inode, int idx) + struct file *file, int idx) { struct tty_struct *tty; if (driver->ops->lookup) - tty = driver->ops->lookup(driver, inode, idx); + tty = driver->ops->lookup(driver, file, idx); else tty = driver->ttys[idx]; @@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode, } /* check whether we're reopening an existing tty */ - tty = tty_driver_lookup_tty(driver, inode, index); + tty = tty_driver_lookup_tty(driver, filp, index); if (IS_ERR(tty)) { mutex_unlock(&tty_mutex); goto out; diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 0af8e7d70d27..0b2954d7172d 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -604,8 +604,7 @@ void devpts_put_ref(struct pts_fs_info *fsi) * * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill. */ -struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index, - void *priv) +struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) { struct dentry *dentry; struct super_block *sb; @@ -629,25 +628,21 @@ struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index, inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; - init_special_inode(inode, S_IFCHR|opts->mode, device); - inode->i_private = priv; + init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index)); sprintf(s, "%d", index); - inode_lock(d_inode(root)); - dentry = d_alloc_name(root, s); if (dentry) { + dentry->d_fsdata = priv; d_add(dentry, inode); fsnotify_create(d_inode(root), dentry); } else { iput(inode); - inode = ERR_PTR(-ENOMEM); + dentry = ERR_PTR(-ENOMEM); } - inode_unlock(d_inode(root)); - - return inode; + return dentry; } /** @@ -656,24 +651,10 @@ struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index, * * Returns whatever was passed as priv in devpts_pty_new for a given inode. */ -void *devpts_get_priv(struct inode *pts_inode) +void *devpts_get_priv(struct dentry *dentry) { - struct dentry *dentry; - void *priv = NULL; - - BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); - - /* Ensure dentry has not been deleted by devpts_pty_kill() */ - dentry = d_find_alias(pts_inode); - if (!dentry) - return NULL; - - if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) - priv = pts_inode->i_private; - - dput(dentry); - - return priv; + WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); + return dentry->d_fsdata; } /** @@ -682,24 +663,14 @@ void *devpts_get_priv(struct inode *pts_inode) * * This is an inverse operation of devpts_pty_new. */ -void devpts_pty_kill(struct inode *inode) +void devpts_pty_kill(struct dentry *dentry) { - struct super_block *sb = pts_sb_from_inode(inode); - struct dentry *root = sb->s_root; - struct dentry *dentry; + WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); - BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); - - inode_lock(d_inode(root)); - - dentry = d_find_alias(inode); - - drop_nlink(inode); + dentry->d_fsdata = NULL; + drop_nlink(dentry->d_inode); d_delete(dentry); dput(dentry); /* d_alloc_name() in devpts_pty_new() */ - dput(dentry); /* d_find_alias above */ - - inode_unlock(d_inode(root)); } static int __init init_devpts_fs(void) diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 358a4db72a27..5871f292b596 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h @@ -27,11 +27,11 @@ int devpts_new_index(struct pts_fs_info *); void devpts_kill_index(struct pts_fs_info *, int); /* mknod in devpts */ -struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *); +struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *); /* get private structure */ -void *devpts_get_priv(struct inode *pts_inode); +void *devpts_get_priv(struct dentry *); /* unlink */ -void devpts_pty_kill(struct inode *inode); +void devpts_pty_kill(struct dentry *); #endif diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 161052477f77..b742b5e47cc2 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -7,7 +7,7 @@ * defined; unless noted otherwise, they are optional, and can be * filled in with a null pointer. * - * struct tty_struct * (*lookup)(struct tty_driver *self, int idx) + * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx) * * Return the tty device corresponding to idx, NULL if there is not * one currently in use and an ERR_PTR value on error. Called under @@ -250,7 +250,7 @@ struct serial_icounter_struct; struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *driver, - struct inode *inode, int idx); + struct file *filp, int idx); int (*install)(struct tty_driver *driver, struct tty_struct *tty); void (*remove)(struct tty_driver *driver, struct tty_struct *tty); int (*open)(struct tty_struct * tty, struct file * filp); -- cgit v1.2.3-71-gd317 From 986ef95ecdd3eb6fa29433e68faa94c7624083be Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 31 Mar 2016 19:03:25 +0300 Subject: IB/mlx5: Expose correct max_sge_rd limit mlx5 devices (Connect-IB, ConnectX-4, ConnectX-4-LX) has a limitation where rdma read work queue entries cannot exceed 512 bytes. A rdma_read wqe needs to fit in 512 bytes: - wqe control segment (16 bytes) - rdma segment (16 bytes) - scatter elements (16 bytes each) So max_sge_rd should be: (512 - 16 - 16) / 16 = 30. Cc: linux-stable@vger.kernel.org Reported-by: Christoph Hellwig Tested-by: Christoph Hellwig Signed-off-by: Sagi Grimberg Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 2 +- include/linux/mlx5/device.h | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5acf346e048e..049754f6e638 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, sizeof(struct mlx5_wqe_ctrl_seg)) / sizeof(struct mlx5_wqe_data_seg); props->max_sge = min(max_rq_sg, max_sq_sg); - props->max_sge_rd = props->max_sge; + props->max_sge_rd = MLX5_MAX_SGE_RD; props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8156e3c9239c..b3575f392492 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -392,6 +392,17 @@ enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; +enum { + /* + * Max wqe size for rdma read is 512 bytes, so this + * limits our max_sge_rd as the wqe needs to fit: + * - ctrl segment (16 bytes) + * - rdma segment (16 bytes) + * - scatter elements (16 bytes each) + */ + MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 +}; + struct mlx5_inbox_hdr { __be16 opcode; u8 rsvd[4]; -- cgit v1.2.3-71-gd317 From e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Sun, 10 Apr 2016 19:13:13 -0600 Subject: IB/security: Restrict use of the write() interface The drivers/infiniband stack uses write() as a replacement for bi-directional ioctl(). This is not safe. There are ways to trigger write calls that result in the return structure that is normally written to user space being shunted off to user specified kernel memory instead. For the immediate repair, detect and deny suspicious accesses to the write API. For long term, update the user space libraries and the kernel API to something that doesn't present the same security vulnerabilities (likely a structured ioctl() interface). The impacted uAPI interfaces are generally only available if hardware from drivers/infiniband is installed in the system. Reported-by: Jann Horn Signed-off-by: Linus Torvalds Signed-off-by: Jason Gunthorpe [ Expanded check to all known write() entry points ] Cc: stable@vger.kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/core/ucm.c | 4 ++++ drivers/infiniband/core/ucma.c | 3 +++ drivers/infiniband/core/uverbs_main.c | 5 +++++ drivers/infiniband/hw/qib/qib_file_ops.c | 5 +++++ drivers/staging/rdma/hfi1/TODO | 2 +- drivers/staging/rdma/hfi1/file_ops.c | 6 ++++++ include/rdma/ib.h | 16 ++++++++++++++++ 7 files changed, 40 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 4a9aa0433b07..7713ef089c3c 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -48,6 +48,7 @@ #include +#include #include #include #include @@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, struct ib_ucm_cmd_hdr hdr; ssize_t result; + if (WARN_ON_ONCE(!ib_safe_file_access(filp))) + return -EACCES; + if (len < sizeof(hdr)) return -EINVAL; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index dd3bcceadfde..c0f3826abb30 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf, struct rdma_ucm_cmd_hdr hdr; ssize_t ret; + if (WARN_ON_ONCE(!ib_safe_file_access(filp))) + return -EACCES; + if (len < sizeof(hdr)) return -EINVAL; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 28ba2cc81535..31f422a70623 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -48,6 +48,8 @@ #include +#include + #include "uverbs.h" MODULE_AUTHOR("Roland Dreier"); @@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, int srcu_key; ssize_t ret; + if (WARN_ON_ONCE(!ib_safe_file_access(filp))) + return -EACCES; + if (count < sizeof hdr) return -EINVAL; diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index e449e394963f..24f4a782e0f4 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -45,6 +45,8 @@ #include #include +#include + #include "qib.h" #include "qib_common.h" #include "qib_user_sdma.h" @@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data, ssize_t ret = 0; void *dest; + if (WARN_ON_ONCE(!ib_safe_file_access(fp))) + return -EACCES; + if (count < sizeof(cmd.type)) { ret = -EINVAL; goto bail; diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO index 05de0dad8762..4c6f1d7d2eaf 100644 --- a/drivers/staging/rdma/hfi1/TODO +++ b/drivers/staging/rdma/hfi1/TODO @@ -3,4 +3,4 @@ July, 2015 - Remove unneeded file entries in sysfs - Remove software processing of IB protocol and place in library for use by qib, ipath (if still present), hfi1, and eventually soft-roce - +- Replace incorrect uAPI diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 541529589736..c1c5bf82addb 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -49,6 +49,8 @@ #include #include +#include + #include "hfi.h" #include "pio.h" #include "device.h" @@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, int uctxt_required = 1; int must_be_root = 0; + /* FIXME: This interface cannot continue out of staging */ + if (WARN_ON_ONCE(!ib_safe_file_access(fp))) + return -EACCES; + if (count < sizeof(cmd)) { ret = -EINVAL; goto bail; diff --git a/include/rdma/ib.h b/include/rdma/ib.h index cf8f9e700e48..a6b93706b0fc 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -34,6 +34,7 @@ #define _RDMA_IB_H #include +#include struct ib_addr { union { @@ -86,4 +87,19 @@ struct sockaddr_ib { __u64 sib_scope_id; }; +/* + * The IB interfaces that use write() as bi-directional ioctl() are + * fundamentally unsafe, since there are lots of ways to trigger "write()" + * calls from various contexts with elevated privileges. That includes the + * traditional suid executable error message writes, but also various kernel + * interfaces that can write to file descriptors. + * + * This function provides protection for the legacy API by restricting the + * calling context. + */ +static inline bool ib_safe_file_access(struct file *filp) +{ + return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS); +} + #endif /* _RDMA_IB_H */ -- cgit v1.2.3-71-gd317 From 66ee95d16a7f1b7b4f1dd74a2d81c6e19dc29a14 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Thu, 28 Apr 2016 16:18:24 -0700 Subject: mm: exclude HugeTLB pages from THP page_mapped() logic HugeTLB pages cannot be split, so we use the compound_mapcount to track rmaps. Currently page_mapped() will check the compound_mapcount, but will also go through the constituent pages of a THP compound page and query the individual _mapcount's too. Unfortunately, page_mapped() does not distinguish between HugeTLB and THP compound pages and assumes that a compound page always needs to have HPAGE_PMD_NR pages querying. For most cases when dealing with HugeTLB this is just inefficient, but for scenarios where the HugeTLB page size is less than the pmd block size (e.g. when using contiguous bit on ARM) this can lead to crashes. This patch adjusts the page_mapped function such that we skip the unnecessary THP reference checks for HugeTLB pages. Fixes: e1534ae95004 ("mm: differentiate page_mapped() from page_mapcount() for compound pages") Signed-off-by: Steve Capper Acked-by: Kirill A. Shutemov Cc: Will Deacon Cc: Catalin Marinas Cc: Michal Hocko Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index a55e5be0894f..79b6c18d0a38 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1031,6 +1031,8 @@ static inline bool page_mapped(struct page *page) page = compound_head(page); if (atomic_read(compound_mapcount_ptr(page)) >= 0) return true; + if (PageHuge(page)) + return false; for (i = 0; i < hpage_nr_pages(page); i++) { if (atomic_read(&page[i]._mapcount) >= 0) return true; -- cgit v1.2.3-71-gd317 From aa88b68c3b1dce8bc3fd54c8a7372a777ff265cd Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 28 Apr 2016 16:18:27 -0700 Subject: thp: keep huge zero page pinned until tlb flush Andrea has found[1] a race condition on MMU-gather based TLB flush vs split_huge_page() or shrinker which frees huge zero under us (patch 1/2 and 2/2 respectively). With new THP refcounting, we don't need patch 1/2: mmu_gather keeps the page pinned until flush is complete and the pin prevents the page from being split under us. We still need patch 2/2. This is simplified version of Andrea's patch. We don't need fancy encoding. [1] http://lkml.kernel.org/r/1447938052-22165-1-git-send-email-aarcange@redhat.com Signed-off-by: Kirill A. Shutemov Reported-by: Andrea Arcangeli Reviewed-by: Andrea Arcangeli Cc: "Aneesh Kumar K.V" Cc: Mel Gorman Cc: Hugh Dickins Cc: Johannes Weiner Cc: Dave Hansen Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 5 +++++ mm/huge_memory.c | 6 +++--- mm/swap.c | 5 +++++ 3 files changed, 13 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7008623e24b1..d7b9e5346fba 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -152,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) } struct page *get_huge_zero_page(void); +void put_huge_zero_page(void); #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) @@ -208,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page) return false; } +static inline void put_huge_zero_page(void) +{ + BUILD_BUG(); +} static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 86f9f8b82f8e..5346de05f471 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -232,7 +232,7 @@ retry: return READ_ONCE(huge_zero_page); } -static void put_huge_zero_page(void) +void put_huge_zero_page(void) { /* * Counter should never go to zero here. Only shrinker can put @@ -1684,12 +1684,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (vma_is_dax(vma)) { spin_unlock(ptl); if (is_huge_zero_pmd(orig_pmd)) - put_huge_zero_page(); + tlb_remove_page(tlb, pmd_page(orig_pmd)); } else if (is_huge_zero_pmd(orig_pmd)) { pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); atomic_long_dec(&tlb->mm->nr_ptes); spin_unlock(ptl); - put_huge_zero_page(); + tlb_remove_page(tlb, pmd_page(orig_pmd)); } else { struct page *page = pmd_page(orig_pmd); page_remove_rmap(page, true); diff --git a/mm/swap.c b/mm/swap.c index a0bc206b4ac6..03aacbcb013f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -728,6 +728,11 @@ void release_pages(struct page **pages, int nr, bool cold) zone = NULL; } + if (is_huge_zero_page(page)) { + put_huge_zero_page(); + continue; + } + page = compound_head(page); if (!put_page_testzero(page)) continue; -- cgit v1.2.3-71-gd317 From 28093f9f34cedeaea0f481c58446d9dac6dd620f Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Thu, 28 Apr 2016 16:18:35 -0700 Subject: numa: fix /proc//numa_maps for THP In gather_pte_stats() a THP pmd is cast into a pte, which is wrong because the layouts may differ depending on the architecture. On s390 this will lead to inaccurate numa_maps accounting in /proc because of misguided pte_present() and pte_dirty() checks on the fake pte. On other architectures pte_present() and pte_dirty() may work by chance, but there may be an issue with direct-access (dax) mappings w/o underlying struct pages when HAVE_PTE_SPECIAL is set and THP is available. In vm_normal_page() the fake pte will be checked with pte_special() and because there is no "special" bit in a pmd, this will always return false and the VM_PFNMAP | VM_MIXEDMAP checking will be skipped. On dax mappings w/o struct pages, an invalid struct page pointer would then be returned that can crash the kernel. This patch fixes the numa_maps THP handling by introducing new "_pmd" variants of the can_gather_numa_stats() and vm_normal_page() functions. Signed-off-by: Gerald Schaefer Cc: Naoya Horiguchi Cc: "Kirill A . Shutemov" Cc: Konstantin Khlebnikov Cc: Michal Hocko Cc: Vlastimil Babka Cc: Jerome Marchand Cc: Johannes Weiner Cc: Dave Hansen Cc: Mel Gorman Cc: Dan Williams Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Michael Holzheu Cc: [4.3+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 33 ++++++++++++++++++++++++++++++--- include/linux/mm.h | 2 ++ mm/memory.c | 40 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 229cb546bee0..541583510cfb 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1518,6 +1518,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, return page; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static struct page *can_gather_numa_stats_pmd(pmd_t pmd, + struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *page; + int nid; + + if (!pmd_present(pmd)) + return NULL; + + page = vm_normal_page_pmd(vma, addr, pmd); + if (!page) + return NULL; + + if (PageReserved(page)) + return NULL; + + nid = page_to_nid(page); + if (!node_isset(nid, node_states[N_MEMORY])) + return NULL; + + return page; +} +#endif + static int gather_pte_stats(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -1527,14 +1553,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, pte_t *orig_pte; pte_t *pte; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { - pte_t huge_pte = *(pte_t *)pmd; struct page *page; - page = can_gather_numa_stats(huge_pte, vma, addr); + page = can_gather_numa_stats_pmd(*pmd, vma, addr); if (page) - gather_stats(page, md, pte_dirty(huge_pte), + gather_stats(page, md, pmd_dirty(*pmd), HPAGE_PMD_SIZE/PAGE_SIZE); spin_unlock(ptl); return 0; @@ -1542,6 +1568,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, if (pmd_trans_unstable(pmd)) return 0; +#endif orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); do { struct page *page = can_gather_numa_stats(*pte, vma, addr); diff --git a/include/linux/mm.h b/include/linux/mm.h index 79b6c18d0a38..864d7221de84 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1140,6 +1140,8 @@ struct zap_details { struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t pmd); int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); diff --git a/mm/memory.c b/mm/memory.c index 93897f23cc11..305537fc8640 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -789,6 +789,46 @@ out: return pfn_to_page(pfn); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t pmd) +{ + unsigned long pfn = pmd_pfn(pmd); + + /* + * There is no pmd_special() but there may be special pmds, e.g. + * in a direct-access (dax) mapping, so let's just replicate the + * !HAVE_PTE_SPECIAL case from vm_normal_page() here. + */ + if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { + if (vma->vm_flags & VM_MIXEDMAP) { + if (!pfn_valid(pfn)) + return NULL; + goto out; + } else { + unsigned long off; + off = (addr - vma->vm_start) >> PAGE_SHIFT; + if (pfn == vma->vm_pgoff + off) + return NULL; + if (!is_cow_mapping(vma->vm_flags)) + return NULL; + } + } + + if (is_zero_pfn(pfn)) + return NULL; + if (unlikely(pfn > highest_memmap_pfn)) + return NULL; + + /* + * NOTE! We still have PageReserved() pages in the page tables. + * eg. VDSO mappings can cause them to exist. + */ +out: + return pfn_to_page(pfn); +} +#endif + /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range -- cgit v1.2.3-71-gd317