From ca8424eebb93cf0c2b311f0f12ea911361b4833f Mon Sep 17 00:00:00 2001 From: Alaa Hleihel Date: Tue, 12 May 2020 10:54:15 -0400 Subject: [PATCH 152/312] [netdrv] net/mlx5e: TX, Error completion is for last WQE in batch Message-id: <20200512105530.4207-50-ahleihel@redhat.com> Patchwork-id: 306921 Patchwork-instance: patchwork O-Subject: [RHEL8.3 BZ 1789382 049/124] net/mlx5e: TX, Error completion is for last WQE in batch Bugzilla: 1789382 RH-Acked-by: Tony Camuso RH-Acked-by: Kamal Heib RH-Acked-by: Jarod Wilson Bugzilla: http://bugzilla.redhat.com/1789382 Upstream: v5.6-rc1 commit b57e66ad42d051ed31319c28ed1b62b191299a29 Author: Tariq Toukan Date: Thu Jan 9 15:53:37 2020 +0200 net/mlx5e: TX, Error completion is for last WQE in batch For a cyclic work queue, when not requesting a completion per WQE, a single CQE might indicate the completion of several WQEs. However, in case some WQE in the batch causes an error, then an error completion is issued, breaking the batch, and pointing to the offending WQE in the wqe_counter field. Hence, WQE-specific error CQE handling (like printing, breaking, etc...) should be performed only for the last WQE in batch. Fixes: 130c7b46c93d ("net/mlx5e: TX, Dump WQs wqe descriptors on CQE with error events") Fixes: fd9b4be8002c ("net/mlx5e: RX, Support multiple outstanding UMR posts") Signed-off-by: Tariq Toukan Reviewed-by: Aya Levin Signed-off-by: Saeed Mahameed Signed-off-by: Alaa Hleihel Signed-off-by: Frantisek Hrbata --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 16 ++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 33 +++++++++++-------------- 2 files changed, 23 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 64d6ecbece80..f0170fb2edbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -614,13 +614,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) wqe_counter = be16_to_cpu(cqe->wqe_counter); - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { - netdev_WARN_ONCE(cq->channel->netdev, - "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); - if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) - queue_work(cq->channel->priv->wq, &sq->recover_work); - break; - } do { struct mlx5e_sq_wqe_info *wi; u16 ci; @@ -630,6 +623,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wi = &sq->db.ico_wqe[ci]; + if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OP in ICOSQ CQE: 0x%x\n", + get_cqe_opcode(cqe)); + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) + queue_work(cq->channel->priv->wq, &sq->recover_work); + break; + } + if (likely(wi->opcode == MLX5_OPCODE_UMR)) { sqcc += MLX5E_UMR_WQEBBS; wi->umr.rq->mpwqe.umr_completed++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 3ce27194ee7e..3bfeb7c06b25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -452,34 +452,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) i = 0; do { + struct mlx5e_tx_wqe_info *wi; u16 wqe_counter; bool last_wqe; + u16 ci; mlx5_cqwq_pop(&cq->wq); wqe_counter = be16_to_cpu(cqe->wqe_counter); - if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { - if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, - &sq->state)) { - struct mlx5e_tx_wqe_info *wi; - u16 ci; - - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); - wi = &sq->db.wqe_info[ci]; - mlx5e_dump_error_cqe(sq, - (struct mlx5_err_cqe *)cqe); - mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); - queue_work(cq->channel->priv->wq, - &sq->recover_work); - } - stats->cqe_err++; - } - do { - struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; - u16 ci; int j; last_wqe = (sqcc == wqe_counter); @@ -517,6 +500,18 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) napi_consume_skb(skb, napi_budget); } while (!last_wqe); + if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, + &sq->state)) { + mlx5e_dump_error_cqe(sq, + (struct mlx5_err_cqe *)cqe); + mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); + queue_work(cq->channel->priv->wq, + &sq->recover_work); + } + stats->cqe_err++; + } + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); stats->cqes += i; -- 2.13.6