forked from amzn/amzn-drivers
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path0004-net-ena-fix-out-of-order-completion.patch
150 lines (130 loc) · 5.13 KB
/
0004-net-ena-fix-out-of-order-completion.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
From 53253faf9e1064d1571eed8d95d3be1cc47cbff9 Mon Sep 17 00:00:00 2001
From: Rafal Kozik <[email protected]>
Date: Wed, 21 Nov 2018 09:21:14 +0100
Subject: [PATCH 04/27] net/ena: fix out of order completion
[ upstream commit 79405ee175857cfdbb508f9d55e2a51d95483be6 ]
rx_buffer_info should be refill not linearly, but out of order.
IDs should be taken from empty_rx_reqs array.
rx_refill_buffer is introduced to temporary storage
bulk of mbufs taken from pool.
In case of error unused mbufs are put back to pool.
Fixes: c2034976673d ("net/ena: add Rx out of order completion")
Change-Id: I68dece48189852fd0b068174adde03602ca1ec94
Signed-off-by: Rafal Kozik <[email protected]>
Acked-by: Michal Krawczyk <[email protected]>
---
drivers/net/ena/ena_ethdev.c | 40 +++++++++++++++++++++++++-----------
drivers/net/ena/ena_ethdev.h | 1 +
2 files changed, 29 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 9e462099f3..87c95b2e7f 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -760,6 +760,10 @@ static void ena_rx_queue_release(void *queue)
rte_free(ring->rx_buffer_info);
ring->rx_buffer_info = NULL;
+ if (ring->rx_refill_buffer)
+ rte_free(ring->rx_refill_buffer);
+ ring->rx_refill_buffer = NULL;
+
if (ring->empty_rx_reqs)
rte_free(ring->empty_rx_reqs);
ring->empty_rx_reqs = NULL;
@@ -1302,6 +1306,17 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
+ rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
+ sizeof(struct rte_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+
+ if (!rxq->rx_refill_buffer) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n");
+ rte_free(rxq->rx_buffer_info);
+ rxq->rx_buffer_info = NULL;
+ return -ENOMEM;
+ }
+
rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
sizeof(uint16_t) * nb_desc,
RTE_CACHE_LINE_SIZE);
@@ -1309,6 +1324,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
rte_free(rxq->rx_buffer_info);
rxq->rx_buffer_info = NULL;
+ rte_free(rxq->rx_refill_buffer);
+ rxq->rx_refill_buffer = NULL;
return -ENOMEM;
}
@@ -1330,7 +1347,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
uint16_t ring_mask = ring_size - 1;
uint16_t next_to_use = rxq->next_to_use;
uint16_t in_use, req_id;
- struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
+ struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
if (unlikely(!count))
return 0;
@@ -1338,13 +1355,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
in_use = rxq->next_to_use - rxq->next_to_clean;
ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
- count = RTE_MIN(count,
- (uint16_t)(ring_size - (next_to_use & ring_mask)));
-
/* get resources for incoming packets */
- rc = rte_mempool_get_bulk(rxq->mb_pool,
- (void **)(&mbufs[next_to_use & ring_mask]),
- count);
+ rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
if (unlikely(rc < 0)) {
rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
PMD_RX_LOG(DEBUG, "there are no enough free buffers");
@@ -1353,15 +1365,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
for (i = 0; i < count; i++) {
uint16_t next_to_use_masked = next_to_use & ring_mask;
- struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
+ struct rte_mbuf *mbuf = mbufs[i];
struct ena_com_buf ebuf;
- rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+ if (likely((i + 4) < count))
+ rte_prefetch0(mbufs[i + 4]);
req_id = rxq->empty_rx_reqs[next_to_use_masked];
rc = validate_rx_req_id(rxq, req_id);
if (unlikely(rc < 0))
break;
+ rxq->rx_buffer_info[req_id] = mbuf;
/* prepare physical address for DMA transaction */
ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
@@ -1370,17 +1384,19 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
&ebuf, req_id);
if (unlikely(rc)) {
- rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
- count - i);
RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
+ rxq->rx_buffer_info[req_id] = NULL;
break;
}
next_to_use++;
}
- if (unlikely(i < count))
+ if (unlikely(i < count)) {
RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
"buffers (from %d)\n", rxq->id, i, count);
+ rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
+ count - i);
+ }
/* When we submitted free recources to device... */
if (likely(i > 0)) {
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 2dc8129e0e..322e90ace8 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -87,6 +87,7 @@ struct ena_ring {
struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
};
+ struct rte_mbuf **rx_refill_buffer;
unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
struct ena_com_io_cq *ena_com_io_cq;
--
2.20.1