From 5249001d69a223811ad654884c6115d55158fd51 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Wed, 1 Sep 2021 19:05:02 +0300 Subject: net/mlx5: Bridge, mark reg_c1 when pushing VLAN On ingress VLAN push also assign value 0x7FE to reg_c1 tunnel id+opts bits (tunnel id 0, which is not a valid tunnel id, and option 0x7FE which was reserved by one of previous patches in the series). In following patch the reg value is matched on egress miss to restore the packet to its original state by removing the VLAN before passing it to the software data path. Signed-off-by: Vlad Buslov Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- include/linux/mlx5/eswitch.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 4ab5c1fc1270..97afcea39a7b 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -130,11 +130,20 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, #define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET) #define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET) #define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */ +#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT /* 0x7FF is a reserved mapping */ #define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0) #define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT) #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK +/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */ +#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1) +#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \ + ESW_TUN_OPTS_BITS) | \ + ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN) +#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \ + GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \ + ESW_TUN_OPTS_OFFSET + 1) u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); -- cgit v1.2.3-71-gd317 From 3663ad34bc707fc85492f4d83a313f5df84718d4 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Thu, 19 Aug 2021 16:18:57 +0300 Subject: net/mlx5: Shift control IRQ to the last index Control IRQ is the first IRQ vector. This complicates handling of completion irqs as we need to offset them by one. in the next patch, there are scenarios where completion and control EQs will share the same irq. for example: functions with single IRQ. To ease such scenarios, we shift control IRQ to the end of the irq array. Signed-off-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/odp.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/eq.c | 9 +++++---- drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h | 2 -- drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 10 +++++----- include/linux/mlx5/driver.h | 2 ++ 5 files changed, 13 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index d0d98e584ebc..81147d774dd2 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1559,6 +1559,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_IB_NUM_PF_EQE, }; param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 605c8ecc3610..792e0d6aa861 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) mlx5_eq_notifier_register(dev, &table->cq_err_nb); param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_NUM_CMD_EQE, .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, }; @@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_NUM_ASYNC_EQE, }; @@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) goto err2; param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = /* TODO: sriov max_vf + */ 1, .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST, }; @@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ncomp_eqs = table->num_comp_eqs; nent = MLX5_COMP_EQ_SIZE; for (i = 0; i < ncomp_eqs; i++) { - int vecidx = i + MLX5_IRQ_VEC_COMP_BASE; struct mlx5_eq_param param = {}; + int vecidx = i; eq = kzalloc(sizeof(*eq), GFP_KERNEL); if (!eq) { @@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev) goto err_out; } - vecidx = MLX5_IRQ_VEC_COMP_BASE; - for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE; - vecidx++) { + for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) { err = irq_cpu_rmap_add(eq_table->rmap, pci_irq_vector(mdev->pdev, vecidx)); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index abd024173c42..8116815663a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -8,8 +8,6 @@ #define MLX5_COMP_EQS_PER_SF 8 -#define MLX5_IRQ_EQ_CTRL (0) - struct mlx5_irq; int mlx5_irq_table_init(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 763c83a02380..a66144b54fc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -194,15 +194,14 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx); } -static void irq_set_name(char *name, int vecidx) +static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) { - if (vecidx == 0) { + if (vecidx == pool->xa_num_irqs.max) { snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx); return; } - snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", - vecidx - MLX5_IRQ_VEC_COMP_BASE); + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx); } static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) @@ -217,7 +216,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) return ERR_PTR(-ENOMEM); irq->irqn = pci_irq_vector(dev->pdev, i); if (!pool->name[0]) - irq_set_name(name, i); + irq_set_name(pool, name, i); else irq_sf_set_name(pool, name, i); ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); @@ -440,6 +439,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, } pf_irq: pool = irq_table->pf_pool; + vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx; irq = irq_pool_request_vector(pool, vecidx, affinity); out: if (IS_ERR(irq)) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index e23417424373..0ca719c00824 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -59,6 +59,8 @@ #define MLX5_ADEV_NAME "mlx5_core" +#define MLX5_IRQ_EQ_CTRL (U8_MAX) + enum { MLX5_BOARD_ID_LEN = 64, }; -- cgit v1.2.3-71-gd317 From f891b7cdbdcda116fd26bbd706f91bd58567aa17 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Sun, 1 Aug 2021 12:08:49 +0300 Subject: net/mlx5: Enable single IRQ for PCI Function Prior to this patch the driver requires two IRQs to function properly, one required IRQ for control and at least one required IRQ for IO. This requirement can be relaxed to one as the driver now allows sharing of IRQs, so control and IO EQs can share the same irq. This is needed for high scale amount of VFs. Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 26 +++++++++++++++++------ include/linux/mlx5/eq.h | 1 - 2 files changed, 19 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index a66144b54fc8..830444f927d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -196,6 +196,12 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) { + if (!pool->xa_num_irqs.max) { + /* in case we only have a single irq for the device */ + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx); + return; + } + if (vecidx == pool->xa_num_irqs.max) { snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx); return; @@ -204,6 +210,11 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx); } +static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool) +{ + return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf")); +} + static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) { struct mlx5_core_dev *dev = pool->dev; @@ -215,7 +226,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) if (!irq) return ERR_PTR(-ENOMEM); irq->irqn = pci_irq_vector(dev->pdev, i); - if (!pool->name[0]) + if (!irq_pool_is_sf_pool(pool)) irq_set_name(pool, name, i); else irq_sf_set_name(pool, name, i); @@ -385,6 +396,9 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx, if (IS_ERR(irq) || !affinity) goto unlock; cpumask_copy(irq->mask, affinity); + if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max && + cpumask_empty(irq->mask)) + cpumask_set_cpu(0, irq->mask); irq_set_affinity_hint(irq->irqn, irq->mask); unlock: mutex_unlock(&pool->lock); @@ -577,6 +591,8 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev) int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table) { + if (!table->pf_pool->xa_num_irqs.max) + return 1; return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min; } @@ -592,19 +608,15 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) if (mlx5_core_is_sf(dev)) return 0; - pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + - MLX5_IRQ_VEC_COMP_BASE; + pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1; pf_vec = min_t(int, pf_vec, num_eqs); - if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE) - return -ENOMEM; total_vec = pf_vec; if (mlx5_sf_max_functions(dev)) total_vec += MLX5_IRQ_CTRL_SF_MAX + MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev); - total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1, - total_vec, PCI_IRQ_MSIX); + total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX); if (total_vec < 0) return total_vec; pf_vec = min(pf_vec, total_vec); diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h index cea6ecb4b73e..ea3ff5a8ced3 100644 --- a/include/linux/mlx5/eq.h +++ b/include/linux/mlx5/eq.h @@ -4,7 +4,6 @@ #ifndef MLX5_CORE_EQ_H #define MLX5_CORE_EQ_H -#define MLX5_IRQ_VEC_COMP_BASE 1 #define MLX5_NUM_CMD_EQE (32) #define MLX5_NUM_ASYNC_EQE (0x1000) #define MLX5_NUM_SPARE_EQE (0x80) -- cgit v1.2.3-71-gd317