]>
Commit | Line | Data |
---|---|---|
2246cbc2 | 1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
1738cd3e | 2 | /* |
2246cbc2 | 3 | * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. |
1738cd3e NB |
4 | */ |
5 | ||
6 | #include "ena_eth_com.h" | |
7 | ||
c2b54204 | 8 | static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( |
1738cd3e NB |
9 | struct ena_com_io_cq *io_cq) |
10 | { | |
11 | struct ena_eth_io_rx_cdesc_base *cdesc; | |
12 | u16 expected_phase, head_masked; | |
13 | u16 desc_phase; | |
14 | ||
15 | head_masked = io_cq->head & (io_cq->q_depth - 1); | |
16 | expected_phase = io_cq->phase; | |
17 | ||
18 | cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr | |
19 | + (head_masked * io_cq->cdesc_entry_size_in_bytes)); | |
20 | ||
bf2746e8 SA |
21 | desc_phase = (READ_ONCE(cdesc->status) & |
22 | ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> | |
23 | ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; | |
1738cd3e NB |
24 | |
25 | if (desc_phase != expected_phase) | |
26 | return NULL; | |
27 | ||
37dff155 NB |
28 | /* Make sure we read the rest of the descriptor after the phase bit |
29 | * has been read | |
30 | */ | |
31 | dma_rmb(); | |
32 | ||
1738cd3e NB |
33 | return cdesc; |
34 | } | |
35 | ||
c2b54204 | 36 | static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) |
1738cd3e NB |
37 | { |
38 | u16 tail_masked; | |
39 | u32 offset; | |
40 | ||
41 | tail_masked = io_sq->tail & (io_sq->q_depth - 1); | |
42 | ||
43 | offset = tail_masked * io_sq->desc_entry_size; | |
44 | ||
45 | return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); | |
46 | } | |
47 | ||
c2b54204 | 48 | static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, |
689b2bda | 49 | u8 *bounce_buffer) |
1738cd3e | 50 | { |
689b2bda | 51 | struct ena_com_llq_info *llq_info = &io_sq->llq_info; |
1738cd3e | 52 | |
689b2bda AK |
53 | u16 dst_tail_mask; |
54 | u32 dst_offset; | |
1738cd3e | 55 | |
689b2bda AK |
56 | dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); |
57 | dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; | |
58 | ||
05d62ca2 SJ |
59 | if (is_llq_max_tx_burst_exists(io_sq)) { |
60 | if (unlikely(!io_sq->entries_in_tx_burst_left)) { | |
da580ca8 SA |
61 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, |
62 | "Error: trying to send more packets than tx burst allows\n"); | |
05d62ca2 SJ |
63 | return -ENOSPC; |
64 | } | |
65 | ||
66 | io_sq->entries_in_tx_burst_left--; | |
da580ca8 SA |
67 | netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, |
68 | "Decreasing entries_in_tx_burst_left of queue %d to %d\n", | |
69 | io_sq->qid, io_sq->entries_in_tx_burst_left); | |
05d62ca2 SJ |
70 | } |
71 | ||
689b2bda AK |
72 | /* Make sure everything was written into the bounce buffer before |
73 | * writing the bounce buffer to the device | |
74 | */ | |
75 | wmb(); | |
76 | ||
77 | /* The line is completed. Copy it to dev */ | |
78 | __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, | |
79 | bounce_buffer, (llq_info->desc_list_entry_size) / 8); | |
1738cd3e | 80 | |
1738cd3e NB |
81 | io_sq->tail++; |
82 | ||
83 | /* Switch phase bit in case of wrap around */ | |
84 | if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) | |
85 | io_sq->phase ^= 1; | |
689b2bda AK |
86 | |
87 | return 0; | |
1738cd3e NB |
88 | } |
89 | ||
c2b54204 | 90 | static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, |
689b2bda AK |
91 | u8 *header_src, |
92 | u16 header_len) | |
1738cd3e | 93 | { |
689b2bda AK |
94 | struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; |
95 | struct ena_com_llq_info *llq_info = &io_sq->llq_info; | |
96 | u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf; | |
97 | u16 header_offset; | |
1738cd3e | 98 | |
689b2bda | 99 | if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) |
1738cd3e NB |
100 | return 0; |
101 | ||
689b2bda AK |
102 | header_offset = |
103 | llq_info->descs_num_before_header * io_sq->desc_entry_size; | |
104 | ||
105 | if (unlikely((header_offset + header_len) > | |
106 | llq_info->desc_list_entry_size)) { | |
da580ca8 SA |
107 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, |
108 | "Trying to write header larger than llq entry can accommodate\n"); | |
689b2bda AK |
109 | return -EFAULT; |
110 | } | |
111 | ||
112 | if (unlikely(!bounce_buffer)) { | |
da580ca8 SA |
113 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, |
114 | "Bounce buffer is NULL\n"); | |
689b2bda AK |
115 | return -EFAULT; |
116 | } | |
117 | ||
118 | memcpy(bounce_buffer + header_offset, header_src, header_len); | |
119 | ||
120 | return 0; | |
121 | } | |
122 | ||
c2b54204 | 123 | static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) |
689b2bda AK |
124 | { |
125 | struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; | |
126 | u8 *bounce_buffer; | |
127 | void *sq_desc; | |
128 | ||
129 | bounce_buffer = pkt_ctrl->curr_bounce_buf; | |
130 | ||
131 | if (unlikely(!bounce_buffer)) { | |
da580ca8 SA |
132 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, |
133 | "Bounce buffer is NULL\n"); | |
689b2bda AK |
134 | return NULL; |
135 | } | |
136 | ||
137 | sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; | |
138 | pkt_ctrl->idx++; | |
139 | pkt_ctrl->descs_left_in_line--; | |
140 | ||
141 | return sq_desc; | |
142 | } | |
143 | ||
c2b54204 | 144 | static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) |
689b2bda AK |
145 | { |
146 | struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; | |
147 | struct ena_com_llq_info *llq_info = &io_sq->llq_info; | |
148 | int rc; | |
149 | ||
150 | if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) | |
151 | return 0; | |
152 | ||
153 | /* bounce buffer was used, so write it and get a new one */ | |
e4ac382e | 154 | if (likely(pkt_ctrl->idx)) { |
689b2bda AK |
155 | rc = ena_com_write_bounce_buffer_to_dev(io_sq, |
156 | pkt_ctrl->curr_bounce_buf); | |
091d0e85 SA |
157 | if (unlikely(rc)) { |
158 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, | |
159 | "Failed to write bounce buffer to device\n"); | |
689b2bda | 160 | return rc; |
091d0e85 | 161 | } |
689b2bda AK |
162 | |
163 | pkt_ctrl->curr_bounce_buf = | |
164 | ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); | |
165 | memset(io_sq->llq_buf_ctrl.curr_bounce_buf, | |
166 | 0x0, llq_info->desc_list_entry_size); | |
167 | } | |
168 | ||
169 | pkt_ctrl->idx = 0; | |
170 | pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header; | |
171 | return 0; | |
172 | } | |
173 | ||
c2b54204 | 174 | static void *get_sq_desc(struct ena_com_io_sq *io_sq) |
689b2bda AK |
175 | { |
176 | if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) | |
177 | return get_sq_desc_llq(io_sq); | |
178 | ||
179 | return get_sq_desc_regular_queue(io_sq); | |
180 | } | |
181 | ||
c2b54204 | 182 | static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) |
689b2bda AK |
183 | { |
184 | struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; | |
185 | struct ena_com_llq_info *llq_info = &io_sq->llq_info; | |
186 | int rc; | |
187 | ||
188 | if (!pkt_ctrl->descs_left_in_line) { | |
189 | rc = ena_com_write_bounce_buffer_to_dev(io_sq, | |
190 | pkt_ctrl->curr_bounce_buf); | |
091d0e85 SA |
191 | if (unlikely(rc)) { |
192 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, | |
193 | "Failed to write bounce buffer to device\n"); | |
689b2bda | 194 | return rc; |
091d0e85 | 195 | } |
689b2bda AK |
196 | |
197 | pkt_ctrl->curr_bounce_buf = | |
198 | ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); | |
4208966f CIK |
199 | memset(io_sq->llq_buf_ctrl.curr_bounce_buf, |
200 | 0x0, llq_info->desc_list_entry_size); | |
689b2bda AK |
201 | |
202 | pkt_ctrl->idx = 0; | |
203 | if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)) | |
204 | pkt_ctrl->descs_left_in_line = 1; | |
205 | else | |
206 | pkt_ctrl->descs_left_in_line = | |
207 | llq_info->desc_list_entry_size / io_sq->desc_entry_size; | |
1738cd3e NB |
208 | } |
209 | ||
689b2bda AK |
210 | return 0; |
211 | } | |
212 | ||
c2b54204 | 213 | static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) |
689b2bda AK |
214 | { |
215 | if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) | |
216 | return ena_com_sq_update_llq_tail(io_sq); | |
217 | ||
218 | io_sq->tail++; | |
219 | ||
220 | /* Switch phase bit in case of wrap around */ | |
221 | if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) | |
222 | io_sq->phase ^= 1; | |
1738cd3e NB |
223 | |
224 | return 0; | |
225 | } | |
226 | ||
c2b54204 | 227 | static struct ena_eth_io_rx_cdesc_base * |
1738cd3e NB |
228 | ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx) |
229 | { | |
230 | idx &= (io_cq->q_depth - 1); | |
231 | return (struct ena_eth_io_rx_cdesc_base *) | |
232 | ((uintptr_t)io_cq->cdesc_addr.virt_addr + | |
233 | idx * io_cq->cdesc_entry_size_in_bytes); | |
234 | } | |
235 | ||
c2b54204 | 236 | static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, |
1738cd3e NB |
237 | u16 *first_cdesc_idx) |
238 | { | |
239 | struct ena_eth_io_rx_cdesc_base *cdesc; | |
240 | u16 count = 0, head_masked; | |
241 | u32 last = 0; | |
242 | ||
243 | do { | |
244 | cdesc = ena_com_get_next_rx_cdesc(io_cq); | |
245 | if (!cdesc) | |
246 | break; | |
247 | ||
248 | ena_com_cq_inc_head(io_cq); | |
249 | count++; | |
bf2746e8 SA |
250 | last = (READ_ONCE(cdesc->status) & |
251 | ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> | |
252 | ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; | |
1738cd3e NB |
253 | } while (!last); |
254 | ||
255 | if (last) { | |
256 | *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx; | |
257 | count += io_cq->cur_rx_pkt_cdesc_count; | |
258 | ||
259 | head_masked = io_cq->head & (io_cq->q_depth - 1); | |
260 | ||
261 | io_cq->cur_rx_pkt_cdesc_count = 0; | |
262 | io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; | |
263 | ||
da580ca8 SA |
264 | netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, |
265 | "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", | |
266 | io_cq->qid, *first_cdesc_idx, count); | |
1738cd3e NB |
267 | } else { |
268 | io_cq->cur_rx_pkt_cdesc_count += count; | |
269 | count = 0; | |
270 | } | |
271 | ||
272 | return count; | |
273 | } | |
274 | ||
0e3a3f6d AK |
275 | static int ena_com_create_meta(struct ena_com_io_sq *io_sq, |
276 | struct ena_com_tx_meta *ena_meta) | |
1738cd3e NB |
277 | { |
278 | struct ena_eth_io_tx_meta_desc *meta_desc = NULL; | |
1738cd3e NB |
279 | |
280 | meta_desc = get_sq_desc(io_sq); | |
f49ed500 SA |
281 | if (unlikely(!meta_desc)) |
282 | return -EFAULT; | |
283 | ||
1738cd3e NB |
284 | memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); |
285 | ||
286 | meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; | |
287 | ||
288 | meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; | |
289 | ||
290 | /* bits 0-9 of the mss */ | |
f49ed500 | 291 | meta_desc->word2 |= ((u32)ena_meta->mss << |
1738cd3e NB |
292 | ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & |
293 | ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; | |
294 | /* bits 10-13 of the mss */ | |
295 | meta_desc->len_ctrl |= ((ena_meta->mss >> 10) << | |
296 | ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & | |
297 | ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK; | |
298 | ||
299 | /* Extended meta desc */ | |
300 | meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; | |
f49ed500 | 301 | meta_desc->len_ctrl |= ((u32)io_sq->phase << |
1738cd3e NB |
302 | ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & |
303 | ENA_ETH_IO_TX_META_DESC_PHASE_MASK; | |
304 | ||
305 | meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; | |
0e3a3f6d AK |
306 | meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; |
307 | ||
1738cd3e NB |
308 | meta_desc->word2 |= ena_meta->l3_hdr_len & |
309 | ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; | |
310 | meta_desc->word2 |= (ena_meta->l3_hdr_offset << | |
311 | ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & | |
312 | ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; | |
313 | ||
f49ed500 | 314 | meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len << |
1738cd3e NB |
315 | ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & |
316 | ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; | |
317 | ||
0e3a3f6d AK |
318 | return ena_com_sq_update_tail(io_sq); |
319 | } | |
320 | ||
321 | static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, | |
322 | struct ena_com_tx_ctx *ena_tx_ctx, | |
323 | bool *have_meta) | |
324 | { | |
325 | struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; | |
1738cd3e | 326 | |
0e3a3f6d AK |
327 | /* When disable meta caching is set, don't bother to save the meta and |
328 | * compare it to the stored version, just create the meta | |
329 | */ | |
330 | if (io_sq->disable_meta_caching) { | |
0e3a3f6d AK |
331 | *have_meta = true; |
332 | return ena_com_create_meta(io_sq, ena_meta); | |
333 | } | |
334 | ||
335 | if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { | |
336 | *have_meta = true; | |
337 | /* Cache the meta desc */ | |
338 | memcpy(&io_sq->cached_tx_meta, ena_meta, | |
339 | sizeof(struct ena_com_tx_meta)); | |
340 | return ena_com_create_meta(io_sq, ena_meta); | |
341 | } | |
342 | ||
343 | *have_meta = false; | |
344 | return 0; | |
1738cd3e NB |
345 | } |
346 | ||
da580ca8 SA |
347 | static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq, |
348 | struct ena_com_rx_ctx *ena_rx_ctx, | |
bf2746e8 | 349 | struct ena_eth_io_rx_cdesc_base *cdesc) |
1738cd3e NB |
350 | { |
351 | ena_rx_ctx->l3_proto = cdesc->status & | |
352 | ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; | |
353 | ena_rx_ctx->l4_proto = | |
354 | (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> | |
355 | ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; | |
356 | ena_rx_ctx->l3_csum_err = | |
248ab773 AK |
357 | !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> |
358 | ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT); | |
1738cd3e | 359 | ena_rx_ctx->l4_csum_err = |
248ab773 AK |
360 | !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> |
361 | ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); | |
cb36bb36 AK |
362 | ena_rx_ctx->l4_csum_checked = |
363 | !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> | |
364 | ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); | |
1738cd3e NB |
365 | ena_rx_ctx->hash = cdesc->hash; |
366 | ena_rx_ctx->frag = | |
367 | (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> | |
368 | ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; | |
369 | ||
da580ca8 SA |
370 | netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, |
371 | "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", | |
372 | ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, | |
373 | ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, | |
374 | ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); | |
1738cd3e NB |
375 | } |
376 | ||
377 | /*****************************************************************************/ | |
378 | /***************************** API **********************************/ | |
379 | /*****************************************************************************/ | |
380 | ||
381 | int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, | |
382 | struct ena_com_tx_ctx *ena_tx_ctx, | |
383 | int *nb_hw_desc) | |
384 | { | |
385 | struct ena_eth_io_tx_desc *desc = NULL; | |
386 | struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; | |
689b2bda | 387 | void *buffer_to_push = ena_tx_ctx->push_header; |
1738cd3e NB |
388 | u16 header_len = ena_tx_ctx->header_len; |
389 | u16 num_bufs = ena_tx_ctx->num_bufs; | |
689b2bda AK |
390 | u16 start_tail = io_sq->tail; |
391 | int i, rc; | |
1738cd3e NB |
392 | bool have_meta; |
393 | u64 addr_hi; | |
394 | ||
395 | WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); | |
396 | ||
397 | /* num_bufs +1 for potential meta desc */ | |
689b2bda | 398 | if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { |
da580ca8 SA |
399 | netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, |
400 | "Not enough space in the tx queue\n"); | |
1738cd3e NB |
401 | return -ENOMEM; |
402 | } | |
403 | ||
404 | if (unlikely(header_len > io_sq->tx_max_header_size)) { | |
da580ca8 SA |
405 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, |
406 | "Header size is too large %d max header: %d\n", | |
407 | header_len, io_sq->tx_max_header_size); | |
1738cd3e NB |
408 | return -EINVAL; |
409 | } | |
410 | ||
689b2bda | 411 | if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && |
091d0e85 SA |
412 | !buffer_to_push)) { |
413 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, | |
414 | "Push header wasn't provided in LLQ mode\n"); | |
689b2bda | 415 | return -EINVAL; |
091d0e85 | 416 | } |
689b2bda AK |
417 | |
418 | rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); | |
1738cd3e NB |
419 | if (unlikely(rc)) |
420 | return rc; | |
421 | ||
0e3a3f6d AK |
422 | rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); |
423 | if (unlikely(rc)) { | |
da580ca8 SA |
424 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, |
425 | "Failed to create and store tx meta desc\n"); | |
0e3a3f6d | 426 | return rc; |
689b2bda | 427 | } |
1738cd3e | 428 | |
689b2bda | 429 | /* If the caller doesn't want to send packets */ |
1738cd3e | 430 | if (unlikely(!num_bufs && !header_len)) { |
689b2bda | 431 | rc = ena_com_close_bounce_buffer(io_sq); |
091d0e85 SA |
432 | if (rc) |
433 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, | |
434 | "Failed to write buffers to LLQ\n"); | |
689b2bda AK |
435 | *nb_hw_desc = io_sq->tail - start_tail; |
436 | return rc; | |
1738cd3e NB |
437 | } |
438 | ||
439 | desc = get_sq_desc(io_sq); | |
689b2bda AK |
440 | if (unlikely(!desc)) |
441 | return -EFAULT; | |
1738cd3e NB |
442 | memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); |
443 | ||
444 | /* Set first desc when we don't have meta descriptor */ | |
445 | if (!have_meta) | |
446 | desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK; | |
447 | ||
f49ed500 | 448 | desc->buff_addr_hi_hdr_sz |= ((u32)header_len << |
1738cd3e NB |
449 | ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & |
450 | ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; | |
f49ed500 | 451 | desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & |
1738cd3e NB |
452 | ENA_ETH_IO_TX_DESC_PHASE_MASK; |
453 | ||
454 | desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; | |
455 | ||
456 | /* Bits 0-9 */ | |
f49ed500 | 457 | desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id << |
1738cd3e NB |
458 | ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & |
459 | ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; | |
460 | ||
461 | desc->meta_ctrl |= (ena_tx_ctx->df << | |
462 | ENA_ETH_IO_TX_DESC_DF_SHIFT) & | |
463 | ENA_ETH_IO_TX_DESC_DF_MASK; | |
464 | ||
465 | /* Bits 10-15 */ | |
466 | desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) << | |
467 | ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & | |
468 | ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; | |
469 | ||
470 | if (ena_tx_ctx->meta_valid) { | |
471 | desc->meta_ctrl |= (ena_tx_ctx->tso_enable << | |
472 | ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & | |
473 | ENA_ETH_IO_TX_DESC_TSO_EN_MASK; | |
474 | desc->meta_ctrl |= ena_tx_ctx->l3_proto & | |
475 | ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; | |
476 | desc->meta_ctrl |= (ena_tx_ctx->l4_proto << | |
477 | ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & | |
478 | ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; | |
479 | desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable << | |
480 | ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & | |
481 | ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; | |
482 | desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable << | |
483 | ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & | |
484 | ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; | |
485 | desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial << | |
486 | ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & | |
487 | ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; | |
488 | } | |
489 | ||
490 | for (i = 0; i < num_bufs; i++) { | |
491 | /* The first desc share the same desc as the header */ | |
492 | if (likely(i != 0)) { | |
689b2bda | 493 | rc = ena_com_sq_update_tail(io_sq); |
091d0e85 SA |
494 | if (unlikely(rc)) { |
495 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, | |
496 | "Failed to update sq tail\n"); | |
689b2bda | 497 | return rc; |
091d0e85 | 498 | } |
1738cd3e NB |
499 | |
500 | desc = get_sq_desc(io_sq); | |
689b2bda AK |
501 | if (unlikely(!desc)) |
502 | return -EFAULT; | |
503 | ||
1738cd3e NB |
504 | memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); |
505 | ||
f49ed500 | 506 | desc->len_ctrl |= ((u32)io_sq->phase << |
1738cd3e NB |
507 | ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & |
508 | ENA_ETH_IO_TX_DESC_PHASE_MASK; | |
509 | } | |
510 | ||
511 | desc->len_ctrl |= ena_bufs->len & | |
512 | ENA_ETH_IO_TX_DESC_LENGTH_MASK; | |
513 | ||
514 | addr_hi = ((ena_bufs->paddr & | |
515 | GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); | |
516 | ||
517 | desc->buff_addr_lo = (u32)ena_bufs->paddr; | |
518 | desc->buff_addr_hi_hdr_sz |= addr_hi & | |
519 | ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; | |
520 | ena_bufs++; | |
521 | } | |
522 | ||
523 | /* set the last desc indicator */ | |
524 | desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; | |
525 | ||
689b2bda | 526 | rc = ena_com_sq_update_tail(io_sq); |
091d0e85 SA |
527 | if (unlikely(rc)) { |
528 | netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, | |
529 | "Failed to update sq tail of the last descriptor\n"); | |
689b2bda | 530 | return rc; |
091d0e85 | 531 | } |
1738cd3e | 532 | |
689b2bda | 533 | rc = ena_com_close_bounce_buffer(io_sq); |
1738cd3e | 534 | |
689b2bda AK |
535 | *nb_hw_desc = io_sq->tail - start_tail; |
536 | return rc; | |
1738cd3e NB |
537 | } |
538 | ||
539 | int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, | |
540 | struct ena_com_io_sq *io_sq, | |
541 | struct ena_com_rx_ctx *ena_rx_ctx) | |
542 | { | |
543 | struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0]; | |
544 | struct ena_eth_io_rx_cdesc_base *cdesc = NULL; | |
5b7022cf | 545 | u16 q_depth = io_cq->q_depth; |
1738cd3e NB |
546 | u16 cdesc_idx = 0; |
547 | u16 nb_hw_desc; | |
68f236df | 548 | u16 i = 0; |
1738cd3e NB |
549 | |
550 | WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); | |
551 | ||
552 | nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); | |
553 | if (nb_hw_desc == 0) { | |
554 | ena_rx_ctx->descs = nb_hw_desc; | |
555 | return 0; | |
556 | } | |
557 | ||
da580ca8 SA |
558 | netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, |
559 | "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, | |
560 | nb_hw_desc); | |
1738cd3e NB |
561 | |
562 | if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { | |
da580ca8 SA |
563 | netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, |
564 | "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, | |
565 | ena_rx_ctx->max_bufs); | |
1738cd3e NB |
566 | return -ENOSPC; |
567 | } | |
568 | ||
68f236df AK |
569 | cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx); |
570 | ena_rx_ctx->pkt_offset = cdesc->offset; | |
571 | ||
572 | do { | |
573 | ena_buf[i].len = cdesc->length; | |
574 | ena_buf[i].req_id = cdesc->req_id; | |
5b7022cf SA |
575 | if (unlikely(ena_buf[i].req_id >= q_depth)) |
576 | return -EIO; | |
68f236df AK |
577 | |
578 | if (++i >= nb_hw_desc) | |
579 | break; | |
580 | ||
1738cd3e NB |
581 | cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); |
582 | ||
68f236df | 583 | } while (1); |
1738cd3e NB |
584 | |
585 | /* Update SQ head ptr */ | |
586 | io_sq->next_to_comp += nb_hw_desc; | |
587 | ||
da580ca8 SA |
588 | netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, |
589 | "[%s][QID#%d] Updating SQ head to: %d\n", __func__, | |
590 | io_sq->qid, io_sq->next_to_comp); | |
1738cd3e NB |
591 | |
592 | /* Get rx flags from the last pkt */ | |
da580ca8 | 593 | ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc); |
1738cd3e NB |
594 | |
595 | ena_rx_ctx->descs = nb_hw_desc; | |
1e584739 | 596 | |
1738cd3e NB |
597 | return 0; |
598 | } | |
599 | ||
600 | int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, | |
601 | struct ena_com_buf *ena_buf, | |
602 | u16 req_id) | |
603 | { | |
604 | struct ena_eth_io_rx_desc *desc; | |
605 | ||
606 | WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); | |
607 | ||
689b2bda | 608 | if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) |
1738cd3e NB |
609 | return -ENOSPC; |
610 | ||
611 | desc = get_sq_desc(io_sq); | |
689b2bda AK |
612 | if (unlikely(!desc)) |
613 | return -EFAULT; | |
614 | ||
1738cd3e NB |
615 | memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); |
616 | ||
617 | desc->length = ena_buf->len; | |
618 | ||
b0ae3ac4 | 619 | desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | |
bf2746e8 | 620 | ENA_ETH_IO_RX_DESC_LAST_MASK | |
1e584739 SA |
621 | ENA_ETH_IO_RX_DESC_COMP_REQ_MASK | |
622 | (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK); | |
1738cd3e NB |
623 | |
624 | desc->req_id = req_id; | |
625 | ||
da580ca8 SA |
626 | netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, |
627 | "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", | |
628 | __func__, io_sq->qid, req_id); | |
629 | ||
1738cd3e NB |
630 | desc->buff_addr_lo = (u32)ena_buf->paddr; |
631 | desc->buff_addr_hi = | |
632 | ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); | |
633 | ||
689b2bda | 634 | return ena_com_sq_update_tail(io_sq); |
1738cd3e NB |
635 | } |
636 | ||
8510e1a3 NB |
637 | bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) |
638 | { | |
639 | struct ena_eth_io_rx_cdesc_base *cdesc; | |
640 | ||
641 | cdesc = ena_com_get_next_rx_cdesc(io_cq); | |
642 | if (cdesc) | |
643 | return false; | |
644 | else | |
645 | return true; | |
646 | } |