You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
589 lines
20 KiB
589 lines
20 KiB
4 years ago
|
From ccb016735ab552893c77a5deeeef4d795c18448e Mon Sep 17 00:00:00 2001
|
||
|
From: Alaa Hleihel <ahleihel@redhat.com>
|
||
|
Date: Sun, 10 May 2020 15:04:02 -0400
|
||
|
Subject: [PATCH 085/312] [netdrv] net/mlx5: Refactor ingress acl configuration
|
||
|
|
||
|
Message-id: <20200510150452.10307-38-ahleihel@redhat.com>
|
||
|
Patchwork-id: 306661
|
||
|
Patchwork-instance: patchwork
|
||
|
O-Subject: [RHEL8.3 BZ 1789380 v2 37/87] net/mlx5: Refactor ingress acl configuration
|
||
|
Bugzilla: 1789380
|
||
|
RH-Acked-by: Kamal Heib <kheib@redhat.com>
|
||
|
RH-Acked-by: Jarod Wilson <jarod@redhat.com>
|
||
|
RH-Acked-by: Tony Camuso <tcamuso@redhat.com>
|
||
|
RH-Acked-by: Jonathan Toppins <jtoppins@redhat.com>
|
||
|
|
||
|
Bugzilla: http://bugzilla.redhat.com/1789380
|
||
|
Upstream: v5.5-rc1
|
||
|
|
||
|
commit 10652f39943ec19d32a6fa44a8523b0d40abcbcf
|
||
|
Author: Parav Pandit <parav@mellanox.com>
|
||
|
Date: Mon Oct 28 23:35:26 2019 +0000
|
||
|
|
||
|
net/mlx5: Refactor ingress acl configuration
|
||
|
|
||
|
Drop, untagged, spoof check and untagged spoof check flow groups are
|
||
|
limited to legacy mode only.
|
||
|
|
||
|
Therefore, following refactoring is done to
|
||
|
(a) improve code readability
|
||
|
(b) have better code split between legacy and offloads mode
|
||
|
|
||
|
1. Move legacy flow groups under legacy structure
|
||
|
2. Add validity check for group deletion
|
||
|
3. Restrict scope of esw_vport_disable_ingress_acl to legacy mode
|
||
|
4. Rename esw_vport_enable_ingress_acl() to
|
||
|
esw_vport_create_ingress_acl_table() and limit its scope to
|
||
|
table creation
|
||
|
5. Introduce legacy flow groups creation helper
|
||
|
esw_legacy_create_ingress_acl_groups() and keep its scope to legacy mode
|
||
|
6. Reduce offloads ingress groups from 4 to just 1 metadata group
|
||
|
per vport
|
||
|
7. Removed redundant IS_ERR_OR_NULL as entries are marked NULL on free.
|
||
|
8. Shortern error message to remove redundant 'E-switch'
|
||
|
|
||
|
Signed-off-by: Parav Pandit <parav@mellanox.com>
|
||
|
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
|
||
|
|
||
|
Signed-off-by: Alaa Hleihel <ahleihel@redhat.com>
|
||
|
Signed-off-by: Frantisek Hrbata <fhrbata@redhat.com>
|
||
|
---
|
||
|
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 228 ++++++++++++---------
|
||
|
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 17 +-
|
||
|
.../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 67 +++++-
|
||
|
3 files changed, 201 insertions(+), 111 deletions(-)
|
||
|
|
||
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
|
||
|
index 97af7d793435..1937198405e1 100644
|
||
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
|
||
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
|
||
|
@@ -1065,57 +1065,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
||
|
vport->egress.acl = NULL;
|
||
|
}
|
||
|
|
||
|
-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||
|
- struct mlx5_vport *vport)
|
||
|
+static int
|
||
|
+esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
|
||
|
+ struct mlx5_vport *vport)
|
||
|
{
|
||
|
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||
|
struct mlx5_core_dev *dev = esw->dev;
|
||
|
- struct mlx5_flow_namespace *root_ns;
|
||
|
- struct mlx5_flow_table *acl;
|
||
|
struct mlx5_flow_group *g;
|
||
|
void *match_criteria;
|
||
|
u32 *flow_group_in;
|
||
|
- /* The ingress acl table contains 4 groups
|
||
|
- * (2 active rules at the same time -
|
||
|
- * 1 allow rule from one of the first 3 groups.
|
||
|
- * 1 drop rule from the last group):
|
||
|
- * 1)Allow untagged traffic with smac=original mac.
|
||
|
- * 2)Allow untagged traffic.
|
||
|
- * 3)Allow traffic with smac=original mac.
|
||
|
- * 4)Drop all other traffic.
|
||
|
- */
|
||
|
- int table_size = 4;
|
||
|
- int err = 0;
|
||
|
-
|
||
|
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
|
||
|
- return -EOPNOTSUPP;
|
||
|
-
|
||
|
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
|
||
|
- return 0;
|
||
|
-
|
||
|
- esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
|
||
|
- vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
||
|
-
|
||
|
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||
|
- mlx5_eswitch_vport_num_to_index(esw, vport->vport));
|
||
|
- if (!root_ns) {
|
||
|
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
|
||
|
- return -EOPNOTSUPP;
|
||
|
- }
|
||
|
+ int err;
|
||
|
|
||
|
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||
|
if (!flow_group_in)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
||
|
- if (IS_ERR(acl)) {
|
||
|
- err = PTR_ERR(acl);
|
||
|
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
|
||
|
- vport->vport, err);
|
||
|
- goto out;
|
||
|
- }
|
||
|
- vport->ingress.acl = acl;
|
||
|
-
|
||
|
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
|
||
|
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||
|
@@ -1125,14 +1089,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
||
|
|
||
|
- g = mlx5_create_flow_group(acl, flow_group_in);
|
||
|
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||
|
if (IS_ERR(g)) {
|
||
|
err = PTR_ERR(g);
|
||
|
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
|
||
|
+ esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
|
||
|
vport->vport, err);
|
||
|
- goto out;
|
||
|
+ goto spoof_err;
|
||
|
}
|
||
|
- vport->ingress.allow_untagged_spoofchk_grp = g;
|
||
|
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
|
||
|
|
||
|
memset(flow_group_in, 0, inlen);
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||
|
@@ -1140,14 +1104,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
||
|
|
||
|
- g = mlx5_create_flow_group(acl, flow_group_in);
|
||
|
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||
|
if (IS_ERR(g)) {
|
||
|
err = PTR_ERR(g);
|
||
|
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
|
||
|
+ esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
|
||
|
vport->vport, err);
|
||
|
- goto out;
|
||
|
+ goto untagged_err;
|
||
|
}
|
||
|
- vport->ingress.allow_untagged_only_grp = g;
|
||
|
+ vport->ingress.legacy.allow_untagged_only_grp = g;
|
||
|
|
||
|
memset(flow_group_in, 0, inlen);
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||
|
@@ -1156,80 +1120,134 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
|
||
|
|
||
|
- g = mlx5_create_flow_group(acl, flow_group_in);
|
||
|
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||
|
if (IS_ERR(g)) {
|
||
|
err = PTR_ERR(g);
|
||
|
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
|
||
|
+ esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
|
||
|
vport->vport, err);
|
||
|
- goto out;
|
||
|
+ goto allow_spoof_err;
|
||
|
}
|
||
|
- vport->ingress.allow_spoofchk_only_grp = g;
|
||
|
+ vport->ingress.legacy.allow_spoofchk_only_grp = g;
|
||
|
|
||
|
memset(flow_group_in, 0, inlen);
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
|
||
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
|
||
|
|
||
|
- g = mlx5_create_flow_group(acl, flow_group_in);
|
||
|
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||
|
if (IS_ERR(g)) {
|
||
|
err = PTR_ERR(g);
|
||
|
- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
|
||
|
+ esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
|
||
|
vport->vport, err);
|
||
|
- goto out;
|
||
|
+ goto drop_err;
|
||
|
}
|
||
|
- vport->ingress.drop_grp = g;
|
||
|
+ vport->ingress.legacy.drop_grp = g;
|
||
|
+ kvfree(flow_group_in);
|
||
|
+ return 0;
|
||
|
|
||
|
-out:
|
||
|
- if (err) {
|
||
|
- if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
|
||
|
- mlx5_destroy_flow_group(
|
||
|
- vport->ingress.allow_spoofchk_only_grp);
|
||
|
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
|
||
|
- mlx5_destroy_flow_group(
|
||
|
- vport->ingress.allow_untagged_only_grp);
|
||
|
- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
|
||
|
- mlx5_destroy_flow_group(
|
||
|
- vport->ingress.allow_untagged_spoofchk_grp);
|
||
|
- if (!IS_ERR_OR_NULL(vport->ingress.acl))
|
||
|
- mlx5_destroy_flow_table(vport->ingress.acl);
|
||
|
+drop_err:
|
||
|
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
|
||
|
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
|
||
|
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
|
||
|
}
|
||
|
-
|
||
|
+allow_spoof_err:
|
||
|
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
|
||
|
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
|
||
|
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
|
||
|
+ }
|
||
|
+untagged_err:
|
||
|
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
|
||
|
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
|
||
|
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
|
||
|
+ }
|
||
|
+spoof_err:
|
||
|
kvfree(flow_group_in);
|
||
|
return err;
|
||
|
}
|
||
|
|
||
|
+int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
|
||
|
+ struct mlx5_vport *vport, int table_size)
|
||
|
+{
|
||
|
+ struct mlx5_core_dev *dev = esw->dev;
|
||
|
+ struct mlx5_flow_namespace *root_ns;
|
||
|
+ struct mlx5_flow_table *acl;
|
||
|
+ int vport_index;
|
||
|
+ int err;
|
||
|
+
|
||
|
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
|
||
|
+ return -EOPNOTSUPP;
|
||
|
+
|
||
|
+ esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
|
||
|
+ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
||
|
+
|
||
|
+ vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
|
||
|
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||
|
+ vport_index);
|
||
|
+ if (!root_ns) {
|
||
|
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
|
||
|
+ vport->vport);
|
||
|
+ return -EOPNOTSUPP;
|
||
|
+ }
|
||
|
+
|
||
|
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
||
|
+ if (IS_ERR(acl)) {
|
||
|
+ err = PTR_ERR(acl);
|
||
|
+ esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
|
||
|
+ vport->vport, err);
|
||
|
+ return err;
|
||
|
+ }
|
||
|
+ vport->ingress.acl = acl;
|
||
|
+ return 0;
|
||
|
+}
|
||
|
+
|
||
|
+void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
|
||
|
+{
|
||
|
+ if (!vport->ingress.acl)
|
||
|
+ return;
|
||
|
+
|
||
|
+ mlx5_destroy_flow_table(vport->ingress.acl);
|
||
|
+ vport->ingress.acl = NULL;
|
||
|
+}
|
||
|
+
|
||
|
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||
|
struct mlx5_vport *vport)
|
||
|
{
|
||
|
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_rule)) {
|
||
|
+ if (vport->ingress.legacy.drop_rule) {
|
||
|
mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
|
||
|
vport->ingress.legacy.drop_rule = NULL;
|
||
|
}
|
||
|
|
||
|
- if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
|
||
|
+ if (vport->ingress.allow_rule) {
|
||
|
mlx5_del_flow_rules(vport->ingress.allow_rule);
|
||
|
vport->ingress.allow_rule = NULL;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
||
|
- struct mlx5_vport *vport)
|
||
|
+static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
|
||
|
+ struct mlx5_vport *vport)
|
||
|
{
|
||
|
- if (IS_ERR_OR_NULL(vport->ingress.acl))
|
||
|
+ if (!vport->ingress.acl)
|
||
|
return;
|
||
|
|
||
|
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
|
||
|
|
||
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
||
|
- mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
|
||
|
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
|
||
|
- mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
|
||
|
- mlx5_destroy_flow_group(vport->ingress.drop_grp);
|
||
|
- mlx5_destroy_flow_table(vport->ingress.acl);
|
||
|
- vport->ingress.acl = NULL;
|
||
|
- vport->ingress.drop_grp = NULL;
|
||
|
- vport->ingress.allow_spoofchk_only_grp = NULL;
|
||
|
- vport->ingress.allow_untagged_only_grp = NULL;
|
||
|
- vport->ingress.allow_untagged_spoofchk_grp = NULL;
|
||
|
+ if (vport->ingress.legacy.allow_spoofchk_only_grp) {
|
||
|
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
|
||
|
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
|
||
|
+ }
|
||
|
+ if (vport->ingress.legacy.allow_untagged_only_grp) {
|
||
|
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
|
||
|
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
|
||
|
+ }
|
||
|
+ if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
|
||
|
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
|
||
|
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
|
||
|
+ }
|
||
|
+ if (vport->ingress.legacy.drop_grp) {
|
||
|
+ mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
|
||
|
+ vport->ingress.legacy.drop_grp = NULL;
|
||
|
+ }
|
||
|
+ esw_vport_destroy_ingress_acl_table(vport);
|
||
|
}
|
||
|
|
||
|
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||
|
@@ -1244,19 +1262,36 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||
|
int err = 0;
|
||
|
u8 *smac_v;
|
||
|
|
||
|
+ /* The ingress acl table contains 4 groups
|
||
|
+ * (2 active rules at the same time -
|
||
|
+ * 1 allow rule from one of the first 3 groups.
|
||
|
+ * 1 drop rule from the last group):
|
||
|
+ * 1)Allow untagged traffic with smac=original mac.
|
||
|
+ * 2)Allow untagged traffic.
|
||
|
+ * 3)Allow traffic with smac=original mac.
|
||
|
+ * 4)Drop all other traffic.
|
||
|
+ */
|
||
|
+ int table_size = 4;
|
||
|
+
|
||
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
||
|
|
||
|
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
|
||
|
- esw_vport_disable_ingress_acl(esw, vport);
|
||
|
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
- err = esw_vport_enable_ingress_acl(esw, vport);
|
||
|
- if (err) {
|
||
|
- mlx5_core_warn(esw->dev,
|
||
|
- "failed to enable ingress acl (%d) on vport[%d]\n",
|
||
|
- err, vport->vport);
|
||
|
- return err;
|
||
|
+ if (!vport->ingress.acl) {
|
||
|
+ err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
|
||
|
+ if (err) {
|
||
|
+ esw_warn(esw->dev,
|
||
|
+ "vport[%d] enable ingress acl err (%d)\n",
|
||
|
+ err, vport->vport);
|
||
|
+ return err;
|
||
|
+ }
|
||
|
+
|
||
|
+ err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
|
||
|
+ if (err)
|
||
|
+ goto out;
|
||
|
}
|
||
|
|
||
|
esw_debug(esw->dev,
|
||
|
@@ -1317,10 +1352,11 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||
|
vport->ingress.legacy.drop_rule = NULL;
|
||
|
goto out;
|
||
|
}
|
||
|
+ kvfree(spec);
|
||
|
+ return 0;
|
||
|
|
||
|
out:
|
||
|
- if (err)
|
||
|
- esw_vport_cleanup_ingress_rules(esw, vport);
|
||
|
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||
|
kvfree(spec);
|
||
|
return err;
|
||
|
}
|
||
|
@@ -1700,7 +1736,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
|
||
|
return 0;
|
||
|
|
||
|
egress_err:
|
||
|
- esw_vport_disable_ingress_acl(esw, vport);
|
||
|
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||
|
mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
|
||
|
vport->egress.legacy.drop_counter = NULL;
|
||
|
|
||
|
@@ -1730,7 +1766,7 @@ static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
|
||
|
mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
|
||
|
vport->egress.legacy.drop_counter = NULL;
|
||
|
|
||
|
- esw_vport_disable_ingress_acl(esw, vport);
|
||
|
+ esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||
|
mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
|
||
|
vport->ingress.legacy.drop_counter = NULL;
|
||
|
}
|
||
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
|
||
|
index 777224ed18bc..963d0df0d66b 100644
|
||
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
|
||
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
|
||
|
@@ -65,25 +65,30 @@
|
||
|
|
||
|
struct vport_ingress {
|
||
|
struct mlx5_flow_table *acl;
|
||
|
+#ifdef __GENKSYMS__
|
||
|
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
|
||
|
struct mlx5_flow_group *allow_spoofchk_only_grp;
|
||
|
struct mlx5_flow_group *allow_untagged_only_grp;
|
||
|
struct mlx5_flow_group *drop_grp;
|
||
|
-#ifdef __GENKSYMS__
|
||
|
struct mlx5_modify_hdr *modify_metadata;
|
||
|
struct mlx5_flow_handle *modify_metadata_rule;
|
||
|
#endif
|
||
|
- struct mlx5_flow_handle *allow_rule;
|
||
|
+ struct mlx5_flow_handle *allow_rule;
|
||
|
#ifdef __GENKSYMS__
|
||
|
struct mlx5_flow_handle *drop_rule;
|
||
|
struct mlx5_fc *drop_counter;
|
||
|
#endif
|
||
|
#ifndef __GENKSYMS__
|
||
|
struct {
|
||
|
+ struct mlx5_flow_group *allow_spoofchk_only_grp;
|
||
|
+ struct mlx5_flow_group *allow_untagged_spoofchk_grp;
|
||
|
+ struct mlx5_flow_group *allow_untagged_only_grp;
|
||
|
+ struct mlx5_flow_group *drop_grp;
|
||
|
struct mlx5_flow_handle *drop_rule;
|
||
|
struct mlx5_fc *drop_counter;
|
||
|
} legacy;
|
||
|
struct {
|
||
|
+ struct mlx5_flow_group *metadata_grp;
|
||
|
struct mlx5_modify_hdr *modify_metadata;
|
||
|
struct mlx5_flow_handle *modify_metadata_rule;
|
||
|
} offloads;
|
||
|
@@ -272,16 +277,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
||
|
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
||
|
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||
|
struct mlx5_vport *vport);
|
||
|
-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||
|
- struct mlx5_vport *vport);
|
||
|
+int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
|
||
|
+ struct mlx5_vport *vport,
|
||
|
+ int table_size);
|
||
|
+void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
|
||
|
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
|
||
|
struct mlx5_vport *vport);
|
||
|
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
||
|
struct mlx5_vport *vport);
|
||
|
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
||
|
struct mlx5_vport *vport);
|
||
|
-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
||
|
- struct mlx5_vport *vport);
|
||
|
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
|
||
|
u32 rate_mbps);
|
||
|
|
||
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
||
|
index 767993b10110..7fe085fa3d29 100644
|
||
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
||
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
||
|
@@ -1860,6 +1860,44 @@ static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||
|
}
|
||
|
}
|
||
|
|
||
|
+static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
|
||
|
+ struct mlx5_vport *vport)
|
||
|
+{
|
||
|
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||
|
+ struct mlx5_flow_group *g;
|
||
|
+ u32 *flow_group_in;
|
||
|
+ int ret = 0;
|
||
|
+
|
||
|
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||
|
+ if (!flow_group_in)
|
||
|
+ return -ENOMEM;
|
||
|
+
|
||
|
+ memset(flow_group_in, 0, inlen);
|
||
|
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
||
|
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
||
|
+
|
||
|
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||
|
+ if (IS_ERR(g)) {
|
||
|
+ ret = PTR_ERR(g);
|
||
|
+ esw_warn(esw->dev,
|
||
|
+ "Failed to create vport[%d] ingress metdata group, err(%d)\n",
|
||
|
+ vport->vport, ret);
|
||
|
+ goto grp_err;
|
||
|
+ }
|
||
|
+ vport->ingress.offloads.metadata_grp = g;
|
||
|
+grp_err:
|
||
|
+ kvfree(flow_group_in);
|
||
|
+ return ret;
|
||
|
+}
|
||
|
+
|
||
|
+static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
|
||
|
+{
|
||
|
+ if (vport->ingress.offloads.metadata_grp) {
|
||
|
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp);
|
||
|
+ vport->ingress.offloads.metadata_grp = NULL;
|
||
|
+ }
|
||
|
+}
|
||
|
+
|
||
|
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||
|
struct mlx5_vport *vport)
|
||
|
{
|
||
|
@@ -1870,8 +1908,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||
|
return 0;
|
||
|
|
||
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
||
|
-
|
||
|
- err = esw_vport_enable_ingress_acl(esw, vport);
|
||
|
+ err = esw_vport_create_ingress_acl_table(esw, vport, 1);
|
||
|
if (err) {
|
||
|
esw_warn(esw->dev,
|
||
|
"failed to enable ingress acl (%d) on vport[%d]\n",
|
||
|
@@ -1879,25 +1916,34 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||
|
return err;
|
||
|
}
|
||
|
|
||
|
+ err = esw_vport_create_ingress_acl_group(esw, vport);
|
||
|
+ if (err)
|
||
|
+ goto group_err;
|
||
|
+
|
||
|
esw_debug(esw->dev,
|
||
|
"vport[%d] configure ingress rules\n", vport->vport);
|
||
|
|
||
|
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
|
||
|
err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
|
||
|
if (err)
|
||
|
- goto out;
|
||
|
+ goto metadata_err;
|
||
|
}
|
||
|
|
||
|
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
|
||
|
mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
||
|
err = esw_vport_ingress_prio_tag_config(esw, vport);
|
||
|
if (err)
|
||
|
- goto out;
|
||
|
+ goto prio_tag_err;
|
||
|
}
|
||
|
+ return 0;
|
||
|
|
||
|
-out:
|
||
|
- if (err)
|
||
|
- esw_vport_disable_ingress_acl(esw, vport);
|
||
|
+prio_tag_err:
|
||
|
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||
|
+metadata_err:
|
||
|
+ esw_vport_cleanup_ingress_rules(esw, vport);
|
||
|
+ esw_vport_destroy_ingress_acl_group(vport);
|
||
|
+group_err:
|
||
|
+ esw_vport_destroy_ingress_acl_table(vport);
|
||
|
return err;
|
||
|
}
|
||
|
|
||
|
@@ -1978,7 +2024,8 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||
|
err = esw_vport_egress_config(esw, vport);
|
||
|
if (err) {
|
||
|
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||
|
- esw_vport_disable_ingress_acl(esw, vport);
|
||
|
+ esw_vport_cleanup_ingress_rules(esw, vport);
|
||
|
+ esw_vport_destroy_ingress_acl_table(vport);
|
||
|
}
|
||
|
}
|
||
|
return err;
|
||
|
@@ -1990,7 +2037,9 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||
|
{
|
||
|
esw_vport_disable_egress_acl(esw, vport);
|
||
|
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||
|
- esw_vport_disable_ingress_acl(esw, vport);
|
||
|
+ esw_vport_cleanup_ingress_rules(esw, vport);
|
||
|
+ esw_vport_destroy_ingress_acl_group(vport);
|
||
|
+ esw_vport_destroy_ingress_acl_table(vport);
|
||
|
}
|
||
|
|
||
|
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||
|
--
|
||
|
2.13.6
|
||
|
|