From ce3e28c0d41fdac1ea0dc6ab747811303f791a16 Mon Sep 17 00:00:00 2001 From: Alaa Hleihel Date: Tue, 12 May 2020 10:54:06 -0400 Subject: [PATCH 146/312] [netdrv] net/mlx5e: Add mlx5e_flower_parse_meta support Message-id: <20200512105530.4207-41-ahleihel@redhat.com> Patchwork-id: 306913 Patchwork-instance: patchwork O-Subject: [RHEL8.3 BZ 1789382 040/124] net/mlx5e: Add mlx5e_flower_parse_meta support Bugzilla: 1789382 RH-Acked-by: Tony Camuso RH-Acked-by: Kamal Heib RH-Acked-by: Jarod Wilson Bugzilla: http://bugzilla.redhat.com/1789382 Upstream: v5.6-rc1 commit 6d65bc64e232896251daba7c43933f0f35859bc3 Author: wenxu Date: Tue Jan 7 17:16:06 2020 +0800 net/mlx5e: Add mlx5e_flower_parse_meta support In the flowtables offload all the devices in the flowtables share the same flow_block. An offload rule will be installed on all the devices. This scenario is not correct. It is no problem if there are only two devices in the flowtable, The rule with ingress and egress on the same device can be reject by driver. But more than two devices in the flowtable will install the wrong rules on hardware. For example: Three devices in a offload flowtables: dev_a, dev_b, dev_c A rule ingress from dev_a and egress to dev_b: The rule will install on device dev_a. The rule will try to install on dev_b but failed for ingress and egress on the same device. The rule will install on dev_c. This is not correct. The flowtables offload avoid this case through restricting the ingress dev with FLOW_DISSECTOR_KEY_META. So the mlx5e driver also should support the FLOW_DISSECTOR_KEY_META parse. Signed-off-by: wenxu Acked-by: Roi Dayan Signed-off-by: Saeed Mahameed Signed-off-by: Alaa Hleihel Signed-off-by: Frantisek Hrbata --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 39 +++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 916a49b916c9..5f56830ab709 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1797,6 +1797,40 @@ static void *get_match_headers_value(u32 flags, outer_headers); } +static int mlx5e_flower_parse_meta(struct net_device *filter_dev, + struct flow_cls_offload *f) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct netlink_ext_ack *extack = f->common.extack; + struct net_device *ingress_dev; + struct flow_match_meta match; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) + return 0; + + flow_rule_match_meta(rule, &match); + if (match.mask->ingress_ifindex != 0xFFFFFFFF) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); + return -EINVAL; + } + + ingress_dev = __dev_get_by_index(dev_net(filter_dev), + match.key->ingress_ifindex); + if (!ingress_dev) { + NL_SET_ERR_MSG_MOD(extack, + "Can't find the ingress port to match on"); + return -EINVAL; + } + + if (ingress_dev != filter_dev) { + NL_SET_ERR_MSG_MOD(extack, + "Can't match on the ingress filter port"); + return -EINVAL; + } + + return 0; +} + static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_cls_offload *f, @@ -1817,6 +1851,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, u16 addr_type = 0; u8 ip_proto = 0; u8 *match_level; + int err; match_level = outer_match_level; @@ -1860,6 +1895,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, spec); } + err = mlx5e_flower_parse_meta(filter_dev, f); + if (err) + return err; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; -- 2.13.6