You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
346 lines
9.7 KiB
346 lines
9.7 KiB
From 746679f876254905a83b0c7ad192d4cc9c149315 Mon Sep 17 00:00:00 2001
|
|
From: Jonathan Toppins <jtoppins@redhat.com>
|
|
Date: Wed, 2 Oct 2019 18:22:57 -0400
|
|
Subject: [PATCH 42/96] [netdrv] bnxt_en: Refactor TPA logic
|
|
|
|
Message-id: <b6a806b7d4b8e826e928a731708ab23ed16a4e2d.1570027456.git.jtoppins@redhat.com>
|
|
Patchwork-id: 276460
|
|
O-Subject: [RHEL-8.2 PATCH 35/78] bnxt_en: Refactor TPA logic.
|
|
Bugzilla: 1724766
|
|
RH-Acked-by: John Linville <linville@redhat.com>
|
|
RH-Acked-by: Jarod Wilson <jarod@redhat.com>
|
|
|
|
Refactor the TPA logic slightly, so that the code can be more easily
|
|
extended to support TPA on the new 57500 chips. In particular, the
|
|
logic to get the next aggregation completion is refactored into a
|
|
new function bnxt_get_agg() so that this operation is made more
|
|
generalized. This operation will be different on the new chip in TPA
|
|
mode. The logic to recycle the aggregation buffers has a new start
|
|
index parameter added for the same purpose.
|
|
|
|
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
|
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
(cherry picked from commit 4a228a3a5e58e5c05c6ffb5b430e5cb936865a8b)
|
|
Bugzilla: 1724766
|
|
Build Info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=23809532
|
|
Tested: build, boot, basic ping
|
|
Signed-off-by: Jonathan Toppins <jtoppins@redhat.com>
|
|
Signed-off-by: Bruno Meneguele <bmeneg@redhat.com>
|
|
---
|
|
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 117 ++++++++++++++++++------------
|
|
1 file changed, 69 insertions(+), 48 deletions(-)
|
|
|
|
Index: src/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
===================================================================
|
|
--- src.orig/drivers/net/ethernet/broadcom/bnxt/bnxt.c 2020-02-06 16:23:15.092518825 +0100
|
|
+++ src/drivers/net/ethernet/broadcom/bnxt/bnxt.c 2020-02-06 16:23:15.989510591 +0100
|
|
@@ -830,8 +830,20 @@
|
|
return 0;
|
|
}
|
|
|
|
-static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
|
|
- u32 agg_bufs)
|
|
+static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
|
|
+ struct bnxt_cp_ring_info *cpr,
|
|
+ u16 cp_cons, u16 curr)
|
|
+{
|
|
+ struct rx_agg_cmp *agg;
|
|
+
|
|
+ cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
|
|
+ agg = (struct rx_agg_cmp *)
|
|
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
|
|
+ return agg;
|
|
+}
|
|
+
|
|
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
|
|
+ u16 start, u32 agg_bufs, bool tpa)
|
|
{
|
|
struct bnxt_napi *bnapi = cpr->bnapi;
|
|
struct bnxt *bp = bnapi->bp;
|
|
@@ -847,8 +859,7 @@
|
|
struct rx_bd *prod_bd;
|
|
struct page *page;
|
|
|
|
- agg = (struct rx_agg_cmp *)
|
|
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
|
|
+ agg = bnxt_get_agg(bp, cpr, idx, start + i);
|
|
cons = agg->rx_agg_cmp_opaque;
|
|
__clear_bit(cons, rxr->rx_agg_bmap);
|
|
|
|
@@ -876,7 +887,6 @@
|
|
|
|
prod = NEXT_RX_AGG(prod);
|
|
sw_prod = NEXT_RX_AGG(sw_prod);
|
|
- cp_cons = NEXT_CMP(cp_cons);
|
|
}
|
|
rxr->rx_agg_prod = prod;
|
|
rxr->rx_sw_agg_prod = sw_prod;
|
|
@@ -959,8 +969,8 @@
|
|
|
|
static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
|
|
struct bnxt_cp_ring_info *cpr,
|
|
- struct sk_buff *skb, u16 cp_cons,
|
|
- u32 agg_bufs)
|
|
+ struct sk_buff *skb, u16 idx,
|
|
+ u32 agg_bufs, bool tpa)
|
|
{
|
|
struct bnxt_napi *bnapi = cpr->bnapi;
|
|
struct pci_dev *pdev = bp->pdev;
|
|
@@ -975,8 +985,7 @@
|
|
struct page *page;
|
|
dma_addr_t mapping;
|
|
|
|
- agg = (struct rx_agg_cmp *)
|
|
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
|
|
+ agg = bnxt_get_agg(bp, cpr, idx, i);
|
|
cons = agg->rx_agg_cmp_opaque;
|
|
frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
|
|
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
|
|
@@ -1010,7 +1019,7 @@
|
|
* allocated already.
|
|
*/
|
|
rxr->rx_agg_prod = prod;
|
|
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
|
|
+ bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
|
|
return NULL;
|
|
}
|
|
|
|
@@ -1023,7 +1032,6 @@
|
|
skb->truesize += PAGE_SIZE;
|
|
|
|
prod = NEXT_RX_AGG(prod);
|
|
- cp_cons = NEXT_CMP(cp_cons);
|
|
}
|
|
rxr->rx_agg_prod = prod;
|
|
return skb;
|
|
@@ -1083,9 +1091,7 @@
|
|
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
|
|
struct rx_tpa_end_cmp *tpa_end = cmp;
|
|
|
|
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
|
|
- RX_TPA_END_CMP_AGG_BUFS) >>
|
|
- RX_TPA_END_CMP_AGG_BUFS_SHIFT;
|
|
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
|
|
}
|
|
|
|
if (agg_bufs) {
|
|
@@ -1197,11 +1203,10 @@
|
|
cons_rx_buf->data = NULL;
|
|
}
|
|
|
|
-static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
|
|
- u32 agg_bufs)
|
|
+static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
|
|
{
|
|
if (agg_bufs)
|
|
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
|
|
+ bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
|
|
}
|
|
|
|
static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
|
|
@@ -1373,9 +1378,7 @@
|
|
skb_shinfo(skb)->gso_size =
|
|
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
|
|
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
|
|
- payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
|
|
- RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
|
|
- RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
|
|
+ payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
|
|
skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
|
|
if (likely(skb))
|
|
tcp_gro_complete(skb);
|
|
@@ -1405,11 +1408,11 @@
|
|
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
|
|
u8 agg_id = TPA_END_AGG_ID(tpa_end);
|
|
u8 *data_ptr, agg_bufs;
|
|
- u16 cp_cons = RING_CMP(*raw_cons);
|
|
unsigned int len;
|
|
struct bnxt_tpa_info *tpa_info;
|
|
dma_addr_t mapping;
|
|
struct sk_buff *skb;
|
|
+ u16 idx = 0;
|
|
void *data;
|
|
|
|
if (unlikely(bnapi->in_reset)) {
|
|
@@ -1427,19 +1430,19 @@
|
|
len = tpa_info->len;
|
|
mapping = tpa_info->mapping;
|
|
|
|
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
|
|
- RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
|
|
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
|
|
|
|
if (agg_bufs) {
|
|
+ idx = RING_CMP(*raw_cons);
|
|
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
|
|
return ERR_PTR(-EBUSY);
|
|
|
|
*event |= BNXT_AGG_EVENT;
|
|
- cp_cons = NEXT_CMP(cp_cons);
|
|
+ idx = NEXT_CMP(idx);
|
|
}
|
|
|
|
if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
|
|
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
|
|
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
|
|
if (agg_bufs > MAX_SKB_FRAGS)
|
|
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
|
|
agg_bufs, (int)MAX_SKB_FRAGS);
|
|
@@ -1449,7 +1452,7 @@
|
|
if (len <= bp->rx_copy_thresh) {
|
|
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
|
|
if (!skb) {
|
|
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
|
|
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
|
|
return NULL;
|
|
}
|
|
} else {
|
|
@@ -1458,7 +1461,7 @@
|
|
|
|
new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
|
|
if (!new_data) {
|
|
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
|
|
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
|
|
return NULL;
|
|
}
|
|
|
|
@@ -1473,7 +1476,7 @@
|
|
|
|
if (!skb) {
|
|
kfree(data);
|
|
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
|
|
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
|
|
return NULL;
|
|
}
|
|
skb_reserve(skb, bp->rx_offset);
|
|
@@ -1481,7 +1484,7 @@
|
|
}
|
|
|
|
if (agg_bufs) {
|
|
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
|
|
+ skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
|
|
if (!skb) {
|
|
/* Page reuse already handled by bnxt_rx_pages(). */
|
|
return NULL;
|
|
@@ -1625,7 +1628,8 @@
|
|
|
|
bnxt_reuse_rx_data(rxr, cons, data);
|
|
if (agg_bufs)
|
|
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
|
|
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
|
|
+ false);
|
|
|
|
rc = -EIO;
|
|
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
|
|
@@ -1648,7 +1652,8 @@
|
|
bnxt_reuse_rx_data(rxr, cons, data);
|
|
if (!skb) {
|
|
if (agg_bufs)
|
|
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
|
|
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
|
|
+ agg_bufs, false);
|
|
rc = -ENOMEM;
|
|
goto next_rx;
|
|
}
|
|
@@ -1668,7 +1673,7 @@
|
|
}
|
|
|
|
if (agg_bufs) {
|
|
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
|
|
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
|
|
if (!skb) {
|
|
rc = -ENOMEM;
|
|
goto next_rx;
|
|
@@ -2486,6 +2491,33 @@
|
|
return 0;
|
|
}
|
|
|
|
+static void bnxt_free_tpa_info(struct bnxt *bp)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < bp->rx_nr_rings; i++) {
|
|
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
|
+
|
|
+ kfree(rxr->rx_tpa);
|
|
+ rxr->rx_tpa = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int bnxt_alloc_tpa_info(struct bnxt *bp)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < bp->rx_nr_rings; i++) {
|
|
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
|
+
|
|
+ rxr->rx_tpa = kcalloc(MAX_TPA, sizeof(struct bnxt_tpa_info),
|
|
+ GFP_KERNEL);
|
|
+ if (!rxr->rx_tpa)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static void bnxt_free_rx_rings(struct bnxt *bp)
|
|
{
|
|
int i;
|
|
@@ -2493,6 +2525,7 @@
|
|
if (!bp->rx_ring)
|
|
return;
|
|
|
|
+ bnxt_free_tpa_info(bp);
|
|
for (i = 0; i < bp->rx_nr_rings; i++) {
|
|
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
|
struct bnxt_ring_struct *ring;
|
|
@@ -2506,9 +2539,6 @@
|
|
page_pool_destroy(rxr->page_pool);
|
|
rxr->page_pool = NULL;
|
|
|
|
- kfree(rxr->rx_tpa);
|
|
- rxr->rx_tpa = NULL;
|
|
-
|
|
kfree(rxr->rx_agg_bmap);
|
|
rxr->rx_agg_bmap = NULL;
|
|
|
|
@@ -2542,7 +2572,7 @@
|
|
|
|
static int bnxt_alloc_rx_rings(struct bnxt *bp)
|
|
{
|
|
- int i, rc, agg_rings = 0, tpa_rings = 0;
|
|
+ int i, rc = 0, agg_rings = 0;
|
|
|
|
if (!bp->rx_ring)
|
|
return -ENOMEM;
|
|
@@ -2550,9 +2580,6 @@
|
|
if (bp->flags & BNXT_FLAG_AGG_RINGS)
|
|
agg_rings = 1;
|
|
|
|
- if (bp->flags & BNXT_FLAG_TPA)
|
|
- tpa_rings = 1;
|
|
-
|
|
for (i = 0; i < bp->rx_nr_rings; i++) {
|
|
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
|
struct bnxt_ring_struct *ring;
|
|
@@ -2594,17 +2621,11 @@
|
|
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
|
|
if (!rxr->rx_agg_bmap)
|
|
return -ENOMEM;
|
|
-
|
|
- if (tpa_rings) {
|
|
- rxr->rx_tpa = kcalloc(MAX_TPA,
|
|
- sizeof(struct bnxt_tpa_info),
|
|
- GFP_KERNEL);
|
|
- if (!rxr->rx_tpa)
|
|
- return -ENOMEM;
|
|
- }
|
|
}
|
|
}
|
|
- return 0;
|
|
+ if (bp->flags & BNXT_FLAG_TPA)
|
|
+ rc = bnxt_alloc_tpa_info(bp);
|
|
+ return rc;
|
|
}
|
|
|
|
static void bnxt_free_tx_rings(struct bnxt *bp)
|