iPXE
efx_hunt.c
Go to the documentation of this file.
1 /**************************************************************************
2  *
3  * Driver datapath for Solarflare network cards
4  *
5  * Written by Shradha Shah, maintained by <pre-boot-drivers@xilinx.com>
6  *
7  * Copyright 2012-2019 Solarflare Communications Inc.
8  * Copyright 2019-2020 Xilinx Inc.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation; either version 2 of the
13  * License, or any later version.
14  *
15  * You can also choose to distribute this program under the terms of
16  * the Unmodified Binary Distribution Licence (as given in the file
17  * COPYING.UBDL), provided that you have satisfied its requirements.
18  *
19  ***************************************************************************/
20 
21 #include <stdint.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <unistd.h>
25 #include <errno.h>
26 #include <assert.h>
27 #include <byteswap.h>
28 #include <ipxe/io.h>
29 #include <ipxe/pci.h>
30 #include <ipxe/malloc.h>
31 #include <ipxe/iobuf.h>
32 #include <ipxe/netdevice.h>
33 #include "efx_hunt.h"
34 #include "efx_bitfield.h"
35 #include "ef10_regs.h"
36 
37 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
38 
39 void efx_hunt_free_special_buffer(void *buf, int bytes)
40 {
41  free_phys(buf, bytes);
42 }
43 
45  struct efx_special_buffer *entry)
46 {
47  void *buffer;
48  dma_addr_t dma_addr;
49 
50  /* Allocate the buffer, aligned on a buffer address boundary. This
51  * buffer will be passed into an MC_CMD_INIT_*Q command to setup the
52  * appropriate type of queue via MCDI.
53  */
55  if (!buffer)
56  return NULL;
57 
58  entry->dma_addr = dma_addr = virt_to_bus(buffer);
59  assert((dma_addr & (EFX_BUF_ALIGN - 1)) == 0);
60 
61  /* Buffer table entries aren't allocated, so set id to zero */
62  entry->id = 0;
63  DBGP("Allocated 0x%x bytes at %p\n", bytes, buffer);
64 
65  return buffer;
66 }
67 
68 /*******************************************************************************
69  *
70  *
71  * TX
72  *
73  *
74  ******************************************************************************/
75 static void
77 {
78  dma_addr_t dma_addr;
79 
80  dma_addr = virt_to_bus(iob->data);
81 
83  ESF_DZ_TX_KER_TYPE, 0,
84  ESF_DZ_TX_KER_CONT, 0,
85  ESF_DZ_TX_KER_BYTE_CNT, iob_len(iob),
86  ESF_DZ_TX_KER_BUF_ADDR, dma_addr);
87 }
88 
89 static void
91 {
92  struct efx_tx_queue *txq = &efx->txq;
93  int ptr = txq->write_ptr & EFX_TXD_MASK;
95 
96  EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, ptr);
98 }
99 
100 int
102 {
103  struct efx_nic *efx = netdev->priv;
104  struct efx_tx_queue *txq = &efx->txq;
105  int fill_level, space;
107  int buf_id;
108 
109  fill_level = txq->write_ptr - txq->read_ptr;
110  space = EFX_TXD_SIZE - fill_level - 1;
111  if (space < 1)
112  return -ENOBUFS;
113 
114  /* Save the iobuffer for later completion */
115  buf_id = txq->write_ptr & EFX_TXD_MASK;
116  assert(txq->buf[buf_id] == NULL);
117  txq->buf[buf_id] = iob;
118 
119  DBGCIO(efx, "tx_buf[%d] for iob %p data %p len %zd\n",
120  buf_id, iob, iob->data, iob_len(iob));
121 
122  /* Form the descriptor, and push it to hardware */
123  txd = txq->ring + buf_id;
125  ++txq->write_ptr;
127 
128  return 0;
129 }
130 
131 static void
132 efx_hunt_transmit_done(struct efx_nic *efx, int id)
133 {
134  struct efx_tx_queue *txq = &efx->txq;
135  unsigned int read_ptr, stop;
136 
137  /* Complete all buffers from read_ptr up to and including id */
138  read_ptr = txq->read_ptr & EFX_TXD_MASK;
139  stop = (id + 1) & EFX_TXD_MASK;
140 
141  while (read_ptr != stop) {
142  struct io_buffer *iob = txq->buf[read_ptr];
143 
144  assert(iob);
145  /* Complete the tx buffer */
146  if (iob)
147  netdev_tx_complete(efx->netdev, iob);
148  DBGCIO(efx, "tx_buf[%d] for iob %p done\n", read_ptr, iob);
149  txq->buf[read_ptr] = NULL;
150 
151  ++txq->read_ptr;
152  read_ptr = txq->read_ptr & EFX_TXD_MASK;
153  }
154 }
155 
157 {
158  struct efx_nic *efx = netdev->priv;
159  struct efx_tx_queue *txq = &efx->txq;
160  size_t bytes;
161 
162  /* Allocate hardware transmit queue */
163  bytes = sizeof(efx_tx_desc_t) * EFX_TXD_SIZE;
165  if (!txq->ring)
166  return -ENOMEM;
167 
168  txq->read_ptr = txq->write_ptr = 0;
169  *dma_addr = txq->entry.dma_addr;
170  return 0;
171 }
172 
173 /*******************************************************************************
174  *
175  *
176  * RX
177  *
178  *
179  ******************************************************************************/
180 static void
182 {
183  dma_addr_t dma_addr = virt_to_bus(iob->data);
184 
186  ESF_DZ_RX_KER_BYTE_CNT, EFX_RX_BUF_SIZE,
187  ESF_DZ_RX_KER_BUF_ADDR, dma_addr);
188 }
189 
190 static void
192 {
193  struct efx_rx_queue *rxq = &efx->rxq;
194  int ptr = rxq->write_ptr & EFX_RXD_MASK;
196 
197  EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, ptr);
199 }
200 
201 static void
203 {
204  struct efx_rx_queue *rxq = &efx->rxq;
205  int fill_level = rxq->write_ptr - rxq->read_ptr;
206  int space = EFX_NUM_RX_DESC - fill_level - 1;
207  int pushed = 0;
208 
209  while (space) {
210  int buf_id = rxq->write_ptr & (EFX_NUM_RX_DESC - 1);
211  int desc_id = rxq->write_ptr & EFX_RXD_MASK;
212  struct io_buffer *iob;
214 
215  assert(rxq->buf[buf_id] == NULL);
216  iob = alloc_iob(EFX_RX_BUF_SIZE);
217  if (!iob)
218  break;
219 
220  DBGCP(efx, "pushing rx_buf[%d] iob %p data %p\n",
221  buf_id, iob, iob->data);
222 
223  rxq->buf[buf_id] = iob;
224  rxd = rxq->ring + desc_id;
226  ++rxq->write_ptr;
227  ++pushed;
228  --space;
229  }
230 
231  /* Push the ptr to hardware */
232  if (pushed > 0) {
234 
235  DBGCP(efx, "pushed %d rx buffers to fill level %d\n",
236  pushed, rxq->write_ptr - rxq->read_ptr);
237  }
238 }
239 
240 static void
241 efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop)
242 {
243  struct efx_rx_queue *rxq = &efx->rxq;
244  unsigned int read_ptr = rxq->read_ptr & EFX_RXD_MASK;
245  unsigned int buf_ptr = rxq->read_ptr & EFX_NUM_RX_DESC_MASK;
246  struct io_buffer *iob;
247 
248  /* id is the lower 4 bits of the desc index + 1 in huntington*/
249  /* hence anding with 15 */
250  assert((id & 15) == ((read_ptr + (len != 0)) & 15));
251 
252  /* Pop this rx buffer out of the software ring */
253  iob = rxq->buf[buf_ptr];
254  rxq->buf[buf_ptr] = NULL;
255 
256  DBGCIO(efx, "popping rx_buf[%d] iob %p data %p with %d bytes %s %x\n",
257  read_ptr, iob, iob->data, len, drop ? "bad" : "ok", drop);
258 
259  /* Pass the packet up if required */
260  if (drop)
261  netdev_rx_err(efx->netdev, iob, EBADMSG);
262  else {
263  iob_put(iob, len);
264  iob_pull(iob, efx->rx_prefix_size);
265  netdev_rx(efx->netdev, iob);
266  }
267 
268  ++rxq->read_ptr;
269 }
270 
272 {
273  struct efx_nic *efx = netdev->priv;
274  struct efx_rx_queue *rxq = &efx->rxq;
275  size_t bytes;
276 
277  /* Allocate hardware receive queue */
278  bytes = sizeof(efx_rx_desc_t) * EFX_RXD_SIZE;
280  if (rxq->ring == NULL)
281  return -ENOMEM;
282 
283  rxq->read_ptr = rxq->write_ptr = 0;
284  *dma_addr = rxq->entry.dma_addr;
285  return 0;
286 }
287 
288 /*******************************************************************************
289  *
290  *
291  * Event queues and interrupts
292  *
293  *
294  ******************************************************************************/
296 {
297  struct efx_nic *efx = netdev->priv;
298  struct efx_ev_queue *evq = &efx->evq;
299  size_t bytes;
300 
301  /* Allocate the hardware event queue */
302  bytes = sizeof(efx_event_t) * EFX_EVQ_SIZE;
304  if (evq->ring == NULL)
305  return -ENOMEM;
306 
307  memset(evq->ring, 0xff, bytes);
308  evq->read_ptr = 0;
309  *dma_addr = evq->entry.dma_addr;
310  return 0;
311 }
312 
313 static void
315 {
317  /* read the ISR */
319 }
320 
321 /**
322  * See if an event is present
323  *
324  * @v event EFX event structure
325  * @ret True An event is pending
326  * @ret False No event is pending
327  *
328  * We check both the high and low dword of the event for all ones. We
329  * wrote all ones when we cleared the event, and no valid event can
330  * have all ones in either its high or low dwords. This approach is
331  * robust against reordering.
332  *
333  * Note that using a single 64-bit comparison is incorrect; even
334  * though the CPU read will be atomic, the DMA write may not be.
335  */
336 static inline int
338 {
339  return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
340  EFX_DWORD_IS_ALL_ONES(event->dword[1])));
341 }
342 
343 static void
345 {
346  struct efx_ev_queue *evq = &efx->evq;
348 
349  if (efx->workaround_35388) {
350  EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
352  ERF_DD_EVQ_IND_RPTR,
355  EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
357  ERF_DD_EVQ_IND_RPTR, evq->read_ptr &
358  ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
360  } else {
361  EFX_POPULATE_DWORD_1(reg, ERF_DZ_EVQ_RPTR, evq->read_ptr);
363  }
364 }
365 
366 static unsigned int
368 {
369  struct efx_rx_queue *rxq = &efx->rxq;
370  int ev_code, desc_ptr, len;
371  int next_ptr_lbits, packet_drop;
372  int rx_cont;
373 
374  /* Decode event */
375  ev_code = EFX_QWORD_FIELD(*evt, ESF_DZ_EV_CODE);
376 
377  switch (ev_code) {
379  desc_ptr = EFX_QWORD_FIELD(*evt, ESF_DZ_TX_DESCR_INDX);
380  efx_hunt_transmit_done(efx, desc_ptr);
381  break;
382 
384  len = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_BYTES);
385  next_ptr_lbits = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_DSC_PTR_LBITS);
386  rx_cont = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_CONT);
387 
388  /* We don't expect to receive scattered packets, so drop the
389  * packet if RX_CONT is set on the current or previous event, or
390  * if len is zero.
391  */
392  packet_drop = (len == 0) | (rx_cont << 1) |
393  (rxq->rx_cont_prev << 2);
394  efx_hunt_receive(efx, next_ptr_lbits, len, packet_drop);
395  rxq->rx_cont_prev = rx_cont;
396  return 1;
397 
398  default:
399  DBGCP(efx, "Unknown event type %d\n", ev_code);
400  break;
401  }
402  return 0;
403 }
404 
406 {
407  struct efx_nic *efx = netdev->priv;
408  struct efx_ev_queue *evq = &efx->evq;
409  efx_event_t *evt;
410  int budget = 10;
411 
412  /* Read the event queue by directly looking for events
413  * (we don't even bother to read the eventq write ptr)
414  */
415  evt = evq->ring + evq->read_ptr;
416  while (efx_hunt_event_present(evt) && (budget > 0)) {
417  DBGCP(efx, "Event at index 0x%x address %p is "
418  EFX_QWORD_FMT "\n", evq->read_ptr,
419  evt, EFX_QWORD_VAL(*evt));
420 
421  budget -= efx_hunt_handle_event(efx, evt);
422 
423  /* Clear the event */
424  EFX_SET_QWORD(*evt);
425 
426  /* Move to the next event. We don't ack the event
427  * queue until the end
428  */
429  evq->read_ptr = ((evq->read_ptr + 1) & EFX_EVQ_MASK);
430  evt = evq->ring + evq->read_ptr;
431  }
432 
433  /* Push more rx buffers if needed */
434  efx_hunt_rxq_fill(efx);
435 
436  /* Clear any pending interrupts */
438 
439  /* Ack the event queue if interrupts are enabled */
440  if (efx->int_en)
442 }
443 
444 void efx_hunt_irq(struct net_device *netdev, int enable)
445 {
446  struct efx_nic *efx = netdev->priv;
447 
448  efx->int_en = enable;
449 
450  /* If interrupts are enabled, prime the event queue. Otherwise ack any
451  * pending interrupts
452  */
453  if (enable)
455  else if (efx->netdev->state & NETDEV_OPEN)
457 }
458 
459 /*******************************************************************************
460  *
461  *
462  * Initialization and Close
463  *
464  *
465  ******************************************************************************/
467 {
468  struct efx_nic *efx = netdev->priv;
470 
471  /* Set interrupt moderation to 0*/
473  ERF_DZ_TC_TIMER_MODE, 0,
474  ERF_DZ_TC_TIMER_VAL, 0);
475  efx_writel_page(efx, &cmd, 0, ER_DZ_EVQ_TMR);
476 
477  /* Ack the eventq */
478  if (efx->int_en)
480 
481  /* Push receive buffers */
482  efx_hunt_rxq_fill(efx);
483 
484  return 0;
485 }
486 
488 {
489  struct efx_nic *efx = netdev->priv;
490  struct efx_rx_queue *rxq = &efx->rxq;
491  struct efx_tx_queue *txq = &efx->txq;
492  int i;
493 
494  /* Complete outstanding descriptors */
495  for (i = 0; i < EFX_NUM_RX_DESC; i++) {
496  if (rxq->buf[i]) {
497  free_iob(rxq->buf[i]);
498  rxq->buf[i] = NULL;
499  }
500  }
501 
502  for (i = 0; i < EFX_TXD_SIZE; i++) {
503  if (txq->buf[i]) {
504  netdev_tx_complete(efx->netdev, txq->buf[i]);
505  txq->buf[i] = NULL;
506  }
507  }
508 
509  /* Clear interrupts */
511 }
#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW
Definition: ef10_regs.h:339
void efx_hunt_irq(struct net_device *netdev, int enable)
Definition: efx_hunt.c:444
#define iob_pull(iobuf, len)
Definition: iobuf.h:102
#define ER_DZ_BIU_INT_ISR
Definition: ef10_regs.h:65
#define EFX_QWORD_FIELD
Definition: efx_bitfield.h:242
iPXE I/O API
void efx_hunt_poll(struct net_device *netdev)
Definition: efx_hunt.c:405
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition: netdevice.h:752
#define efx_writel_table(efx, value, index, reg)
Definition: efx_common.h:216
#define iob_put(iobuf, len)
Definition: iobuf.h:120
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:586
int efx_hunt_rx_init(struct net_device *netdev, dma_addr_t *dma_addr)
Definition: efx_hunt.c:271
static int efx_hunt_event_present(efx_event_t *event)
See if an event is present.
Definition: efx_hunt.c:337
#define EFX_TXD_MASK
Definition: efx_common.h:59
static unsigned int unsigned int reg
Definition: myson.h:162
struct efx_special_buffer entry
Definition: efx_common.h:115
dma_addr_t dma_addr
Definition: efx_common.h:84
Hardware access.
Definition: efx_common.h:147
A transmit queue.
Definition: efx_common.h:89
#define NETDEV_OPEN
Network device is open.
Definition: netdevice.h:438
A receive queue.
Definition: efx_common.h:107
#define ER_DZ_EVQ_TMR
Definition: ef10_regs.h:89
Error codes.
efx_dword_t dword[2]
Definition: efx_bitfield.h:95
#define EFX_EVQ_MASK
Definition: efx_common.h:61
#define EFX_EVQ_SIZE
Definition: efx_common.h:60
unsigned int read_ptr
Definition: efx_common.h:121
I/O buffers.
void free_iob(struct io_buffer *iobuf)
Free I/O buffer.
Definition: iobuf.c:146
static void efx_hunt_build_rx_desc(efx_rx_desc_t *rxd, struct io_buffer *iob)
Definition: efx_hunt.c:181
#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH
Definition: ef10_regs.h:338
efx_rx_desc_t * ring
Definition: efx_common.h:109
bool workaround_35388
Definition: efx_common.h:175
unsigned int state
Current device state.
Definition: netdevice.h:395
static void *__malloc malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.h:62
int efx_hunt_ev_init(struct net_device *netdev, dma_addr_t *dma_addr)
Definition: efx_hunt.c:295
unsigned long dma_addr_t
Definition: bnx2.h:20
#define ER_DD_EVQ_INDIRECT
Definition: ef10_regs.h:335
static void efx_hunt_rxq_fill(struct efx_nic *efx)
Definition: efx_hunt.c:202
#define efx_writel_page(efx, value, index, reg)
Definition: efx_common.h:219
uint32_t buffer
Buffer index (or NETVSC_RNDIS_NO_BUFFER)
Definition: netvsc.h:16
int efx_hunt_tx_init(struct net_device *netdev, dma_addr_t *dma_addr)
Definition: efx_hunt.c:156
unsigned int rx_cont_prev
Definition: efx_common.h:124
A doubleword (4 byte) datatype - little-endian in HW.
Definition: efx_bitfield.h:87
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:129
Dynamic memory allocation.
#define ESE_DZ_EV_CODE_RX_EV
Definition: ef10_regs.h:138
#define ER_DZ_EVQ_RPTR
Definition: ef10_regs.h:80
static void efx_hunt_notify_rx_desc(struct efx_nic *efx)
Definition: efx_hunt.c:191
static void efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop)
Definition: efx_hunt.c:241
#define EFX_POPULATE_QWORD_4(qword,...)
Definition: efx_bitfield.h:390
#define ENOMEM
Not enough space.
Definition: errno.h:534
A buffer table allocation backing a tx dma, rx dma or eventq.
Definition: efx_common.h:83
int int_en
INT_REG_KER.
Definition: efx_common.h:171
#define EFX_NUM_RX_DESC_MASK
Definition: efx_common.h:69
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
void efx_hunt_close(struct net_device *netdev)
Definition: efx_hunt.c:487
#define DBGP(...)
Definition: compiler.h:532
Assertions.
#define EBADMSG
Bad message.
Definition: errno.h:333
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
efx_tx_desc_t * ring
Definition: efx_common.h:91
void * priv
Driver private data.
Definition: netdevice.h:431
Efx bitfield access.
#define EFX_SET_QWORD(qword)
Definition: efx_bitfield.h:400
static void efx_hunt_clear_interrupts(struct efx_nic *efx)
Definition: efx_hunt.c:314
static void efx_hunt_build_tx_desc(efx_tx_desc_t *txd, struct io_buffer *iob)
Definition: efx_hunt.c:76
static struct net_device * netdev
Definition: gdbudp.c:52
#define EFX_RXD_SIZE
Definition: efx_common.h:56
efx_qword_t efx_tx_desc_t
Definition: efx_common.h:52
#define EFX_NUM_RX_DESC
Definition: efx_common.h:68
#define ER_DZ_RX_DESC_UPD
Definition: ef10_regs.h:98
EF10 hardware architecture definitions.
#define EFX_POPULATE_QWORD_2(qword,...)
Definition: efx_bitfield.h:394
struct efx_special_buffer entry
Definition: efx_common.h:135
#define EFX_RXD_MASK
Definition: efx_common.h:57
unsigned int read_ptr
Definition: efx_common.h:103
#define EFX_DWORD_IS_ALL_ONES(dword)
Definition: efx_bitfield.h:230
unsigned int write_ptr
Definition: efx_common.h:100
#define txd
Definition: davicom.c:143
union aes_table_entry entry[256]
Table entries, indexed by S(N)
Definition: aes.c:26
PCI bus.
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:155
static void efx_hunt_transmit_done(struct efx_nic *efx, int id)
Definition: efx_hunt.c:132
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
A network device.
Definition: netdevice.h:352
#define EFX_RX_BUF_SIZE
Definition: efx_common.h:74
#define EFX_QWORD_VAL(qword)
Definition: efx_bitfield.h:112
#define EFX_POPULATE_DWORD_1(dword,...)
Definition: efx_bitfield.h:423
#define DBGCIO(...)
Definition: compiler.h:556
struct efx_rx_queue rxq
Definition: efx_common.h:164
#define EFX_POPULATE_DWORD_2(dword,...)
Definition: efx_bitfield.h:421
unsigned int rx_prefix_size
Definition: efx_common.h:168
#define ESE_DZ_EV_CODE_TX_EV
Definition: ef10_regs.h:137
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:548
efx_qword_t efx_rx_desc_t
Definition: efx_common.h:51
int efx_hunt_open(struct net_device *netdev)
Definition: efx_hunt.c:466
efx_qword_t efx_event_t
Definition: efx_common.h:53
efx_event_t * ring
Definition: efx_common.h:132
Network device management.
uint32_t len
Length.
Definition: ena.h:14
int efx_hunt_transmit(struct net_device *netdev, struct io_buffer *iob)
Definition: efx_hunt.c:101
#define ENOBUFS
No buffer space available.
Definition: errno.h:498
#define ER_DZ_TX_DESC_UPD_DWORD
Definition: ef10_regs.h:325
struct efx_ev_queue evq
Definition: efx_common.h:166
#define EFX_QWORD_FMT
Definition: efx_bitfield.h:108
struct efx_tx_queue txq
Definition: efx_common.h:165
void * data
Start of data.
Definition: iobuf.h:48
struct io_buffer * buf[EFX_TXD_SIZE]
Definition: efx_common.h:94
static unsigned int efx_hunt_handle_event(struct efx_nic *efx, efx_event_t *evt)
Definition: efx_hunt.c:367
#define EFX_BUF_ALIGN
Definition: efx_common.h:55
#define EFX_TXD_SIZE
Definition: efx_common.h:58
#define DBGCP(...)
Definition: compiler.h:539
static void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.h:77
struct io_buffer * buf[EFX_NUM_RX_DESC]
Definition: efx_common.h:112
unsigned int write_ptr
Definition: efx_common.h:118
An event queue.
Definition: efx_common.h:128
struct efx_special_buffer entry
Definition: efx_common.h:97
static void * efx_hunt_alloc_special_buffer(int bytes, struct efx_special_buffer *entry)
Definition: efx_hunt.c:44
void efx_hunt_free_special_buffer(void *buf, int bytes)
Definition: efx_hunt.c:39
unsigned int read_ptr
Definition: efx_common.h:138
#define ERF_DD_EVQ_IND_RPTR_WIDTH
Definition: ef10_regs.h:341
uint8_t bytes[64]
Definition: ib_mad.h:16
void efx_readl(struct efx_nic *efx, efx_dword_t *value, unsigned int reg)
Definition: efx_common.c:57
static void efx_hunt_notify_tx_desc(struct efx_nic *efx)
Definition: efx_hunt.c:90
static void efx_hunt_evq_read_ack(struct efx_nic *efx)
Definition: efx_hunt.c:344
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
A quadword (8 byte) datatype - little-endian in HW.
Definition: efx_bitfield.h:92
struct net_device * netdev
Definition: efx_common.h:148
#define rxd
Definition: davicom.c:145
void * memset(void *dest, int character, size_t len) __nonnull
A persistent I/O buffer.
Definition: iobuf.h:33