iPXE
efx_hunt.c
Go to the documentation of this file.
1/**************************************************************************
2 *
3 * Driver datapath for Solarflare network cards
4 *
5 * Written by Shradha Shah, maintained by <pre-boot-drivers@xilinx.com>
6 *
7 * Copyright 2012-2019 Solarflare Communications Inc.
8 * Copyright 2019-2020 Xilinx Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or any later version.
14 *
15 * You can also choose to distribute this program under the terms of
16 * the Unmodified Binary Distribution Licence (as given in the file
17 * COPYING.UBDL), provided that you have satisfied its requirements.
18 *
19 ***************************************************************************/
20
21#include <stdint.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <unistd.h>
26#include <errno.h>
27#include <assert.h>
28#include <byteswap.h>
29#include <ipxe/io.h>
30#include <ipxe/pci.h>
31#include <ipxe/malloc.h>
32#include <ipxe/iobuf.h>
33#include <ipxe/netdevice.h>
34#include "efx_hunt.h"
35#include "efx_bitfield.h"
36#include "ef10_regs.h"
37
38FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
39
41{
42 free_phys(buf, bytes);
43}
44
46 struct efx_special_buffer *entry)
47{
48 void *buffer;
49 dma_addr_t dma_addr;
50
51 /* Allocate the buffer, aligned on a buffer address boundary. This
52 * buffer will be passed into an MC_CMD_INIT_*Q command to setup the
53 * appropriate type of queue via MCDI.
54 */
56 if (!buffer)
57 return NULL;
58
59 entry->dma_addr = dma_addr = virt_to_bus(buffer);
60 assert((dma_addr & (EFX_BUF_ALIGN - 1)) == 0);
61
62 /* Buffer table entries aren't allocated, so set id to zero */
63 entry->id = 0;
64 DBGP("Allocated 0x%x bytes at %p\n", bytes, buffer);
65
66 return buffer;
67}
68
69/*******************************************************************************
70 *
71 *
72 * TX
73 *
74 *
75 ******************************************************************************/
76static void
78{
79 dma_addr_t dma_addr;
80
81 dma_addr = virt_to_bus(iob->data);
82
84 ESF_DZ_TX_KER_TYPE, 0,
85 ESF_DZ_TX_KER_CONT, 0,
86 ESF_DZ_TX_KER_BYTE_CNT, iob_len(iob),
87 ESF_DZ_TX_KER_BUF_ADDR, dma_addr);
88}
89
90static void
92{
93 struct efx_tx_queue *txq = &efx->txq;
94 int ptr = txq->write_ptr & EFX_TXD_MASK;
96
97 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, ptr);
99}
100
101int
103{
104 struct efx_nic *efx = netdev->priv;
105 struct efx_tx_queue *txq = &efx->txq;
106 int fill_level, space;
108 int buf_id;
109
110 fill_level = txq->write_ptr - txq->read_ptr;
111 space = EFX_TXD_SIZE - fill_level - 1;
112 if (space < 1)
113 return -ENOBUFS;
114
115 /* Save the iobuffer for later completion */
116 buf_id = txq->write_ptr & EFX_TXD_MASK;
117 assert(txq->buf[buf_id] == NULL);
118 txq->buf[buf_id] = iob;
119
120 DBGCIO(efx, "tx_buf[%d] for iob %p data %p len %zd\n",
121 buf_id, iob, iob->data, iob_len(iob));
122
123 /* Form the descriptor, and push it to hardware */
124 txd = txq->ring + buf_id;
126 ++txq->write_ptr;
128
129 return 0;
130}
131
132static void
133efx_hunt_transmit_done(struct efx_nic *efx, int id)
134{
135 struct efx_tx_queue *txq = &efx->txq;
136 unsigned int read_ptr, stop;
137
138 /* Complete all buffers from read_ptr up to and including id */
140 stop = (id + 1) & EFX_TXD_MASK;
141
142 while (read_ptr != stop) {
143 struct io_buffer *iob = txq->buf[read_ptr];
144
145 assert(iob);
146 /* Complete the tx buffer */
147 if (iob)
148 netdev_tx_complete(efx->netdev, iob);
149 DBGCIO(efx, "tx_buf[%d] for iob %p done\n", read_ptr, iob);
150 txq->buf[read_ptr] = NULL;
151
152 ++txq->read_ptr;
153 read_ptr = txq->read_ptr & EFX_TXD_MASK;
154 }
155}
156
158{
159 struct efx_nic *efx = netdev->priv;
160 struct efx_tx_queue *txq = &efx->txq;
161 size_t bytes;
162
163 /* Allocate hardware transmit queue */
164 bytes = sizeof(efx_tx_desc_t) * EFX_TXD_SIZE;
166 if (!txq->ring)
167 return -ENOMEM;
168
169 txq->read_ptr = txq->write_ptr = 0;
170 *dma_addr = txq->entry.dma_addr;
171 return 0;
172}
173
174/*******************************************************************************
175 *
176 *
177 * RX
178 *
179 *
180 ******************************************************************************/
181static void
183{
184 dma_addr_t dma_addr = virt_to_bus(iob->data);
185
187 ESF_DZ_RX_KER_BYTE_CNT, EFX_RX_BUF_SIZE,
188 ESF_DZ_RX_KER_BUF_ADDR, dma_addr);
189}
190
191static void
193{
194 struct efx_rx_queue *rxq = &efx->rxq;
195 int ptr = rxq->write_ptr & EFX_RXD_MASK;
197
198 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, ptr);
200}
201
202static void
204{
205 struct efx_rx_queue *rxq = &efx->rxq;
206 int fill_level = rxq->write_ptr - rxq->read_ptr;
207 int space = EFX_NUM_RX_DESC - fill_level - 1;
208 int pushed = 0;
209
210 while (space) {
211 int buf_id = rxq->write_ptr & (EFX_NUM_RX_DESC - 1);
212 int desc_id = rxq->write_ptr & EFX_RXD_MASK;
213 struct io_buffer *iob;
215
216 assert(rxq->buf[buf_id] == NULL);
218 if (!iob)
219 break;
220
221 DBGCP(efx, "pushing rx_buf[%d] iob %p data %p\n",
222 buf_id, iob, iob->data);
223
224 rxq->buf[buf_id] = iob;
225 rxd = rxq->ring + desc_id;
227 ++rxq->write_ptr;
228 ++pushed;
229 --space;
230 }
231
232 /* Push the ptr to hardware */
233 if (pushed > 0) {
235
236 DBGCP(efx, "pushed %d rx buffers to fill level %d\n",
237 pushed, rxq->write_ptr - rxq->read_ptr);
238 }
239}
240
241static void
242efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop)
243{
244 struct efx_rx_queue *rxq = &efx->rxq;
245 unsigned int read_ptr = rxq->read_ptr & EFX_RXD_MASK;
246 unsigned int buf_ptr = rxq->read_ptr & EFX_NUM_RX_DESC_MASK;
247 struct io_buffer *iob;
248
249 /* id is the lower 4 bits of the desc index + 1 in huntington*/
250 /* hence anding with 15 */
251 assert((id & 15) == ((read_ptr + (len != 0)) & 15));
252
253 /* Pop this rx buffer out of the software ring */
254 iob = rxq->buf[buf_ptr];
255 rxq->buf[buf_ptr] = NULL;
256
257 DBGCIO(efx, "popping rx_buf[%d] iob %p data %p with %d bytes %s %x\n",
258 read_ptr, iob, iob->data, len, drop ? "bad" : "ok", drop);
259
260 /* Pass the packet up if required */
261 if (drop)
262 netdev_rx_err(efx->netdev, iob, EBADMSG);
263 else {
264 iob_put(iob, len);
265 iob_pull(iob, efx->rx_prefix_size);
266 netdev_rx(efx->netdev, iob);
267 }
268
269 ++rxq->read_ptr;
270}
271
273{
274 struct efx_nic *efx = netdev->priv;
275 struct efx_rx_queue *rxq = &efx->rxq;
276 size_t bytes;
277
278 /* Allocate hardware receive queue */
279 bytes = sizeof(efx_rx_desc_t) * EFX_RXD_SIZE;
281 if (rxq->ring == NULL)
282 return -ENOMEM;
283
284 rxq->read_ptr = rxq->write_ptr = 0;
285 *dma_addr = rxq->entry.dma_addr;
286 return 0;
287}
288
289/*******************************************************************************
290 *
291 *
292 * Event queues and interrupts
293 *
294 *
295 ******************************************************************************/
297{
298 struct efx_nic *efx = netdev->priv;
299 struct efx_ev_queue *evq = &efx->evq;
300 size_t bytes;
301
302 /* Allocate the hardware event queue */
303 bytes = sizeof(efx_event_t) * EFX_EVQ_SIZE;
305 if (evq->ring == NULL)
306 return -ENOMEM;
307
308 memset(evq->ring, 0xff, bytes);
309 evq->read_ptr = 0;
310 *dma_addr = evq->entry.dma_addr;
311 return 0;
312}
313
314static void
316{
318 /* read the ISR */
320}
321
322/**
323 * See if an event is present
324 *
325 * @v event EFX event structure
326 * @ret True An event is pending
327 * @ret False No event is pending
328 *
329 * We check both the high and low dword of the event for all ones. We
330 * wrote all ones when we cleared the event, and no valid event can
331 * have all ones in either its high or low dwords. This approach is
332 * robust against reordering.
333 *
334 * Note that using a single 64-bit comparison is incorrect; even
335 * though the CPU read will be atomic, the DMA write may not be.
336 */
337static inline int
339{
340 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
341 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
342}
343
344static void
346{
347 struct efx_ev_queue *evq = &efx->evq;
349
350 if (efx->workaround_35388) {
351 EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
353 ERF_DD_EVQ_IND_RPTR,
356 EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
358 ERF_DD_EVQ_IND_RPTR, evq->read_ptr &
359 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
361 } else {
362 EFX_POPULATE_DWORD_1(reg, ERF_DZ_EVQ_RPTR, evq->read_ptr);
364 }
365}
366
367static unsigned int
369{
370 struct efx_rx_queue *rxq = &efx->rxq;
371 int ev_code, desc_ptr, len;
372 int next_ptr_lbits, packet_drop;
373 int rx_cont;
374
375 /* Decode event */
376 ev_code = EFX_QWORD_FIELD(*evt, ESF_DZ_EV_CODE);
377
378 switch (ev_code) {
380 desc_ptr = EFX_QWORD_FIELD(*evt, ESF_DZ_TX_DESCR_INDX);
381 efx_hunt_transmit_done(efx, desc_ptr);
382 break;
383
385 len = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_BYTES);
386 next_ptr_lbits = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_DSC_PTR_LBITS);
387 rx_cont = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_CONT);
388
389 /* We don't expect to receive scattered packets, so drop the
390 * packet if RX_CONT is set on the current or previous event, or
391 * if len is zero.
392 */
393 packet_drop = (len == 0) | (rx_cont << 1) |
394 (rxq->rx_cont_prev << 2);
395 efx_hunt_receive(efx, next_ptr_lbits, len, packet_drop);
396 rxq->rx_cont_prev = rx_cont;
397 return 1;
398
399 default:
400 DBGCP(efx, "Unknown event type %d\n", ev_code);
401 break;
402 }
403 return 0;
404}
405
407{
408 struct efx_nic *efx = netdev->priv;
409 struct efx_ev_queue *evq = &efx->evq;
410 efx_event_t *evt;
411 int budget = 10;
412
413 /* Read the event queue by directly looking for events
414 * (we don't even bother to read the eventq write ptr)
415 */
416 evt = evq->ring + evq->read_ptr;
417 while (efx_hunt_event_present(evt) && (budget > 0)) {
418 DBGCP(efx, "Event at index 0x%x address %p is "
419 EFX_QWORD_FMT "\n", evq->read_ptr,
420 evt, EFX_QWORD_VAL(*evt));
421
422 budget -= efx_hunt_handle_event(efx, evt);
423
424 /* Clear the event */
425 EFX_SET_QWORD(*evt);
426
427 /* Move to the next event. We don't ack the event
428 * queue until the end
429 */
430 evq->read_ptr = ((evq->read_ptr + 1) & EFX_EVQ_MASK);
431 evt = evq->ring + evq->read_ptr;
432 }
433
434 /* Push more rx buffers if needed */
436
437 /* Clear any pending interrupts */
439
440 /* Ack the event queue if interrupts are enabled */
441 if (efx->int_en)
443}
444
445void efx_hunt_irq(struct net_device *netdev, int enable)
446{
447 struct efx_nic *efx = netdev->priv;
448
449 efx->int_en = enable;
450
451 /* If interrupts are enabled, prime the event queue. Otherwise ack any
452 * pending interrupts
453 */
454 if (enable)
456 else if (efx->netdev->state & NETDEV_OPEN)
458}
459
460/*******************************************************************************
461 *
462 *
463 * Initialization and Close
464 *
465 *
466 ******************************************************************************/
468{
469 struct efx_nic *efx = netdev->priv;
471
472 /* Set interrupt moderation to 0*/
474 ERF_DZ_TC_TIMER_MODE, 0,
475 ERF_DZ_TC_TIMER_VAL, 0);
477
478 /* Ack the eventq */
479 if (efx->int_en)
481
482 /* Push receive buffers */
484
485 return 0;
486}
487
489{
490 struct efx_nic *efx = netdev->priv;
491 struct efx_rx_queue *rxq = &efx->rxq;
492 struct efx_tx_queue *txq = &efx->txq;
493 int i;
494
495 /* Complete outstanding descriptors */
496 for (i = 0; i < EFX_NUM_RX_DESC; i++) {
497 if (rxq->buf[i]) {
498 free_iob(rxq->buf[i]);
499 rxq->buf[i] = NULL;
500 }
501 }
502
503 for (i = 0; i < EFX_TXD_SIZE; i++) {
504 if (txq->buf[i]) {
505 netdev_tx_complete(efx->netdev, txq->buf[i]);
506 txq->buf[i] = NULL;
507 }
508 }
509
510 /* Clear interrupts */
512}
#define NULL
NULL pointer (VOID *)
Definition Base.h:322
struct golan_eqe_cmd cmd
Definition CIB_PRM.h:1
Assertions.
#define assert(condition)
Assert a condition at run-time.
Definition assert.h:50
#define txd
Definition davicom.c:144
#define rxd
Definition davicom.c:146
ring len
Length.
Definition dwmac.h:226
EF10 hardware architecture definitions.
#define ER_DZ_EVQ_TMR
Definition ef10_regs.h:89
#define ER_DZ_EVQ_RPTR
Definition ef10_regs.h:80
#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH
Definition ef10_regs.h:338
#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW
Definition ef10_regs.h:339
#define ESE_DZ_EV_CODE_TX_EV
Definition ef10_regs.h:137
#define ESE_DZ_EV_CODE_RX_EV
Definition ef10_regs.h:138
#define ER_DD_EVQ_INDIRECT
Definition ef10_regs.h:335
#define ER_DZ_RX_DESC_UPD
Definition ef10_regs.h:98
#define ER_DZ_BIU_INT_ISR
Definition ef10_regs.h:65
#define ER_DZ_TX_DESC_UPD_DWORD
Definition ef10_regs.h:325
#define ERF_DD_EVQ_IND_RPTR_WIDTH
Definition ef10_regs.h:341
Efx bitfield access.
#define EFX_QWORD_VAL(qword)
#define EFX_QWORD_FMT
#define EFX_POPULATE_DWORD_2(dword,...)
#define EFX_SET_QWORD(qword)
#define EFX_DWORD_IS_ALL_ONES(dword)
#define EFX_POPULATE_QWORD_2(qword,...)
#define EFX_POPULATE_DWORD_1(dword,...)
#define EFX_POPULATE_QWORD_4(qword,...)
union efx_dword efx_dword_t
A doubleword (4 byte) datatype - little-endian in HW.
#define EFX_QWORD_FIELD
void efx_readl(struct efx_nic *efx, efx_dword_t *value, unsigned int reg)
Definition efx_common.c:57
#define efx_writel_page(efx, value, index, reg)
Definition efx_common.h:219
#define EFX_EVQ_MASK
Definition efx_common.h:61
#define EFX_TXD_SIZE
Definition efx_common.h:58
#define efx_writel_table(efx, value, index, reg)
Definition efx_common.h:216
#define EFX_EVQ_SIZE
Definition efx_common.h:60
#define EFX_RXD_SIZE
Definition efx_common.h:56
efx_qword_t efx_rx_desc_t
Definition efx_common.h:51
#define EFX_TXD_MASK
Definition efx_common.h:59
efx_qword_t efx_event_t
Definition efx_common.h:53
#define EFX_RXD_MASK
Definition efx_common.h:57
#define EFX_NUM_RX_DESC_MASK
Definition efx_common.h:69
#define EFX_NUM_RX_DESC
Definition efx_common.h:68
#define EFX_BUF_ALIGN
Definition efx_common.h:55
#define EFX_RX_BUF_SIZE
Definition efx_common.h:74
efx_qword_t efx_tx_desc_t
Definition efx_common.h:52
void efx_hunt_free_special_buffer(void *buf, int bytes)
Definition efx_hunt.c:40
static void efx_hunt_rxq_fill(struct efx_nic *efx)
Definition efx_hunt.c:203
int efx_hunt_open(struct net_device *netdev)
Definition efx_hunt.c:467
static void efx_hunt_evq_read_ack(struct efx_nic *efx)
Definition efx_hunt.c:345
static void efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop)
Definition efx_hunt.c:242
static void efx_hunt_clear_interrupts(struct efx_nic *efx)
Definition efx_hunt.c:315
static void efx_hunt_build_rx_desc(efx_rx_desc_t *rxd, struct io_buffer *iob)
Definition efx_hunt.c:182
static void efx_hunt_build_tx_desc(efx_tx_desc_t *txd, struct io_buffer *iob)
Definition efx_hunt.c:77
static int efx_hunt_event_present(efx_event_t *event)
See if an event is present.
Definition efx_hunt.c:338
void efx_hunt_close(struct net_device *netdev)
Definition efx_hunt.c:488
int efx_hunt_ev_init(struct net_device *netdev, dma_addr_t *dma_addr)
Definition efx_hunt.c:296
int efx_hunt_rx_init(struct net_device *netdev, dma_addr_t *dma_addr)
Definition efx_hunt.c:272
static void efx_hunt_notify_rx_desc(struct efx_nic *efx)
Definition efx_hunt.c:192
void efx_hunt_poll(struct net_device *netdev)
Definition efx_hunt.c:406
int efx_hunt_transmit(struct net_device *netdev, struct io_buffer *iob)
Definition efx_hunt.c:102
static unsigned int efx_hunt_handle_event(struct efx_nic *efx, efx_event_t *evt)
Definition efx_hunt.c:368
void efx_hunt_irq(struct net_device *netdev, int enable)
Definition efx_hunt.c:445
int efx_hunt_tx_init(struct net_device *netdev, dma_addr_t *dma_addr)
Definition efx_hunt.c:157
static void * efx_hunt_alloc_special_buffer(int bytes, struct efx_special_buffer *entry)
Definition efx_hunt.c:45
static void efx_hunt_notify_tx_desc(struct efx_nic *efx)
Definition efx_hunt.c:91
static void efx_hunt_transmit_done(struct efx_nic *efx, int id)
Definition efx_hunt.c:133
Error codes.
#define dma_addr_t
static struct net_device * netdev
Definition gdbudp.c:53
#define DBGP(...)
Definition compiler.h:532
#define DBGCP(...)
Definition compiler.h:539
#define DBGCIO(...)
Definition compiler.h:556
uint32_t buffer
Buffer index (or NETVSC_RNDIS_NO_BUFFER)
Definition netvsc.h:5
#define FILE_LICENCE(_licence)
Declare a particular licence as applying to a file.
Definition compiler.h:896
#define EBADMSG
Bad message.
Definition errno.h:334
#define ENOMEM
Not enough space.
Definition errno.h:535
#define ENOBUFS
No buffer space available.
Definition errno.h:499
uint8_t bytes[64]
Definition ib_mad.h:5
iPXE I/O API
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition io.h:184
String functions.
void * memset(void *dest, int character, size_t len) __nonnull
void free_iob(struct io_buffer *iobuf)
Free I/O buffer.
Definition iobuf.c:153
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition iobuf.c:131
I/O buffers.
#define iob_put(iobuf, len)
Definition iobuf.h:125
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition iobuf.h:160
#define iob_pull(iobuf, len)
Definition iobuf.h:107
void * malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition malloc.c:707
void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition malloc.c:723
Dynamic memory allocation.
static unsigned int unsigned int reg
Definition myson.h:162
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition netdevice.c:549
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition netdevice.c:587
Network device management.
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition netdevice.h:767
#define NETDEV_OPEN
Network device is open.
Definition netdevice.h:439
PCI bus.
An event queue.
Definition efx_common.h:128
efx_event_t * ring
Definition efx_common.h:132
unsigned int read_ptr
Definition efx_common.h:138
struct efx_special_buffer entry
Definition efx_common.h:135
Hardware access.
Definition efx_common.h:147
struct net_device * netdev
Definition efx_common.h:148
struct efx_tx_queue txq
Definition efx_common.h:165
unsigned int rx_prefix_size
Definition efx_common.h:168
struct efx_rx_queue rxq
Definition efx_common.h:164
struct efx_ev_queue evq
Definition efx_common.h:166
bool workaround_35388
Definition efx_common.h:175
int int_en
INT_REG_KER.
Definition efx_common.h:171
A receive queue.
Definition efx_common.h:107
unsigned int rx_cont_prev
Definition efx_common.h:124
efx_rx_desc_t * ring
Definition efx_common.h:109
unsigned int read_ptr
Definition efx_common.h:121
unsigned int write_ptr
Definition efx_common.h:118
struct io_buffer * buf[EFX_NUM_RX_DESC]
Definition efx_common.h:112
struct efx_special_buffer entry
Definition efx_common.h:115
A buffer table allocation backing a tx dma, rx dma or eventq.
Definition efx_common.h:83
dma_addr_t dma_addr
Definition efx_common.h:84
A transmit queue.
Definition efx_common.h:89
efx_tx_desc_t * ring
Definition efx_common.h:91
struct io_buffer * buf[EFX_TXD_SIZE]
Definition efx_common.h:94
unsigned int read_ptr
Definition efx_common.h:103
struct efx_special_buffer entry
Definition efx_common.h:97
unsigned int write_ptr
Definition efx_common.h:100
A persistent I/O buffer.
Definition iobuf.h:38
void * data
Start of data.
Definition iobuf.h:53
A network device.
Definition netdevice.h:353
unsigned int state
Current device state.
Definition netdevice.h:396
efx_dword_t dword[2]