iPXE
efx_hunt.c
Go to the documentation of this file.
1 /**************************************************************************
2  *
3  * Driver datapath for Solarflare network cards
4  *
5  * Written by Shradha Shah <sshah@solarflare.com>
6  *
7  * Copyright 2012-2017 Solarflare Communications Inc.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation; either version 2 of the
12  * License, or any later version.
13  *
14  * You can also choose to distribute this program under the terms of
15  * the Unmodified Binary Distribution Licence (as given in the file
16  * COPYING.UBDL), provided that you have satisfied its requirements.
17  *
18  ***************************************************************************/
19 
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <assert.h>
26 #include <byteswap.h>
27 #include <ipxe/io.h>
28 #include <ipxe/pci.h>
29 #include <ipxe/malloc.h>
30 #include <ipxe/iobuf.h>
31 #include <ipxe/netdevice.h>
32 #include "efx_hunt.h"
33 #include "efx_bitfield.h"
34 #include "ef10_regs.h"
35 
36 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
37 
38 void efx_hunt_free_special_buffer(void *buf, int bytes)
39 {
40  free_dma(buf, bytes);
41 }
42 
44  struct efx_special_buffer *entry)
45 {
46  void *buffer;
47  dma_addr_t dma_addr;
48 
49  /* Allocate the buffer, aligned on a buffer address boundary. This
50  * buffer will be passed into an MC_CMD_INIT_*Q command to setup the
51  * appropriate type of queue via MCDI.
52  */
54  if (!buffer)
55  return NULL;
56 
57  entry->dma_addr = dma_addr = virt_to_bus(buffer);
58  assert((dma_addr & (EFX_BUF_ALIGN - 1)) == 0);
59 
60  /* Buffer table entries aren't allocated, so set id to zero */
61  entry->id = 0;
62  DBGP("Allocated 0x%x bytes at %p\n", bytes, buffer);
63 
64  return buffer;
65 }
66 
67 /*******************************************************************************
68  *
69  *
70  * TX
71  *
72  *
73  ******************************************************************************/
74 static void
76 {
77  dma_addr_t dma_addr;
78 
79  dma_addr = virt_to_bus(iob->data);
80 
82  ESF_DZ_TX_KER_TYPE, 0,
83  ESF_DZ_TX_KER_CONT, 0,
84  ESF_DZ_TX_KER_BYTE_CNT, iob_len(iob),
85  ESF_DZ_TX_KER_BUF_ADDR, dma_addr);
86 }
87 
88 static void
90 {
91  struct efx_tx_queue *txq = &efx->txq;
92  int ptr = txq->write_ptr & EFX_TXD_MASK;
94 
95  EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, ptr);
97 }
98 
99 int
101 {
102  struct efx_nic *efx = netdev_priv(netdev);
103  struct efx_tx_queue *txq = &efx->txq;
104  int fill_level, space;
106  int buf_id;
107 
108  fill_level = txq->write_ptr - txq->read_ptr;
109  space = EFX_TXD_SIZE - fill_level - 1;
110  if (space < 1)
111  return -ENOBUFS;
112 
113  /* Save the iobuffer for later completion */
114  buf_id = txq->write_ptr & EFX_TXD_MASK;
115  assert(txq->buf[buf_id] == NULL);
116  txq->buf[buf_id] = iob;
117 
118  DBGCIO(efx, "tx_buf[%d] for iob %p data %p len %zd\n",
119  buf_id, iob, iob->data, iob_len(iob));
120 
121  /* Form the descriptor, and push it to hardware */
122  txd = txq->ring + buf_id;
124  ++txq->write_ptr;
126 
127  return 0;
128 }
129 
130 static void
131 efx_hunt_transmit_done(struct efx_nic *efx, int id)
132 {
133  struct efx_tx_queue *txq = &efx->txq;
134  unsigned int read_ptr, stop;
135 
136  /* Complete all buffers from read_ptr up to and including id */
137  read_ptr = txq->read_ptr & EFX_TXD_MASK;
138  stop = (id + 1) & EFX_TXD_MASK;
139 
140  while (read_ptr != stop) {
141  struct io_buffer *iob = txq->buf[read_ptr];
142 
143  assert(iob);
144  /* Complete the tx buffer */
145  if (iob)
146  netdev_tx_complete(efx->netdev, iob);
147  DBGCIO(efx, "tx_buf[%d] for iob %p done\n", read_ptr, iob);
148  txq->buf[read_ptr] = NULL;
149 
150  ++txq->read_ptr;
151  read_ptr = txq->read_ptr & EFX_TXD_MASK;
152  }
153 }
154 
156 {
157  struct efx_nic *efx = netdev_priv(netdev);
158  struct efx_tx_queue *txq = &efx->txq;
159  size_t bytes;
160 
161  /* Allocate hardware transmit queue */
162  bytes = sizeof(efx_tx_desc_t) * EFX_TXD_SIZE;
164  if (!txq->ring)
165  return -ENOMEM;
166 
167  txq->read_ptr = txq->write_ptr = 0;
168  *dma_addr = txq->entry.dma_addr;
169  return 0;
170 }
171 
172 /*******************************************************************************
173  *
174  *
175  * RX
176  *
177  *
178  ******************************************************************************/
179 static void
181 {
182  dma_addr_t dma_addr = virt_to_bus(iob->data);
183 
185  ESF_DZ_RX_KER_BYTE_CNT, EFX_RX_BUF_SIZE,
186  ESF_DZ_RX_KER_BUF_ADDR, dma_addr);
187 }
188 
189 static void
191 {
192  struct efx_rx_queue *rxq = &efx->rxq;
193  int ptr = rxq->write_ptr & EFX_RXD_MASK;
195 
196  EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, ptr);
198 }
199 
200 static void
202 {
203  struct efx_rx_queue *rxq = &efx->rxq;
204  int fill_level = rxq->write_ptr - rxq->read_ptr;
205  int space = EFX_NUM_RX_DESC - fill_level - 1;
206  int pushed = 0;
207 
208  while (space) {
209  int buf_id = rxq->write_ptr & (EFX_NUM_RX_DESC - 1);
210  int desc_id = rxq->write_ptr & EFX_RXD_MASK;
211  struct io_buffer *iob;
213 
214  assert(rxq->buf[buf_id] == NULL);
215  iob = alloc_iob(EFX_RX_BUF_SIZE);
216  if (!iob)
217  break;
218 
219  DBGCP(efx, "pushing rx_buf[%d] iob %p data %p\n",
220  buf_id, iob, iob->data);
221 
222  rxq->buf[buf_id] = iob;
223  rxd = rxq->ring + desc_id;
225  ++rxq->write_ptr;
226  ++pushed;
227  --space;
228  }
229 
230  /* Push the ptr to hardware */
231  if (pushed > 0) {
233 
234  DBGCP(efx, "pushed %d rx buffers to fill level %d\n",
235  pushed, rxq->write_ptr - rxq->read_ptr);
236  }
237 }
238 
239 static void
240 efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop)
241 {
242  struct efx_rx_queue *rxq = &efx->rxq;
243  unsigned int read_ptr = rxq->read_ptr & EFX_RXD_MASK;
244  unsigned int buf_ptr = rxq->read_ptr & EFX_NUM_RX_DESC_MASK;
245  struct io_buffer *iob;
246 
247  /* id is the lower 4 bits of the desc index + 1 in huntington*/
248  /* hence anding with 15 */
249  assert((id & 15) == ((read_ptr + (len != 0)) & 15));
250 
251  /* Pop this rx buffer out of the software ring */
252  iob = rxq->buf[buf_ptr];
253  rxq->buf[buf_ptr] = NULL;
254 
255  DBGCIO(efx, "popping rx_buf[%d] iob %p data %p with %d bytes %s %x\n",
256  read_ptr, iob, iob->data, len, drop ? "bad" : "ok", drop);
257 
258  /* Pass the packet up if required */
259  if (drop)
260  netdev_rx_err(efx->netdev, iob, EBADMSG);
261  else {
262  iob_put(iob, len);
263  iob_pull(iob, efx->rx_prefix_size);
264  netdev_rx(efx->netdev, iob);
265  }
266 
267  ++rxq->read_ptr;
268 }
269 
271 {
272  struct efx_nic *efx = netdev_priv(netdev);
273  struct efx_rx_queue *rxq = &efx->rxq;
274  size_t bytes;
275 
276  /* Allocate hardware receive queue */
277  bytes = sizeof(efx_rx_desc_t) * EFX_RXD_SIZE;
279  if (rxq->ring == NULL)
280  return -ENOMEM;
281 
282  rxq->read_ptr = rxq->write_ptr = 0;
283  *dma_addr = rxq->entry.dma_addr;
284  return 0;
285 }
286 
287 /*******************************************************************************
288  *
289  *
290  * Event queues and interrupts
291  *
292  *
293  ******************************************************************************/
295 {
296  struct efx_nic *efx = netdev_priv(netdev);
297  struct efx_ev_queue *evq = &efx->evq;
298  size_t bytes;
299 
300  /* Allocate the hardware event queue */
301  bytes = sizeof(efx_event_t) * EFX_EVQ_SIZE;
303  if (evq->ring == NULL)
304  return -ENOMEM;
305 
306  memset(evq->ring, 0xff, bytes);
307  evq->read_ptr = 0;
308  *dma_addr = evq->entry.dma_addr;
309  return 0;
310 }
311 
312 static void
314 {
316  /* read the ISR */
318 }
319 
320 /**
321  * See if an event is present
322  *
323  * @v event EFX event structure
324  * @ret True An event is pending
325  * @ret False No event is pending
326  *
327  * We check both the high and low dword of the event for all ones. We
328  * wrote all ones when we cleared the event, and no valid event can
329  * have all ones in either its high or low dwords. This approach is
330  * robust against reordering.
331  *
332  * Note that using a single 64-bit comparison is incorrect; even
333  * though the CPU read will be atomic, the DMA write may not be.
334  */
335 static inline int
337 {
338  return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
339  EFX_DWORD_IS_ALL_ONES(event->dword[1])));
340 }
341 
342 static void
344 {
345  struct efx_ev_queue *evq = &efx->evq;
347 
348  if (efx->workaround_35388) {
349  EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
351  ERF_DD_EVQ_IND_RPTR,
354  EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
356  ERF_DD_EVQ_IND_RPTR, evq->read_ptr &
357  ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
359  } else {
360  EFX_POPULATE_DWORD_1(reg, ERF_DZ_EVQ_RPTR, evq->read_ptr);
362  }
363 }
364 
365 static unsigned int
367 {
368  struct efx_rx_queue *rxq = &efx->rxq;
369  int ev_code, desc_ptr, len;
370  int next_ptr_lbits, packet_drop;
371  int rx_cont;
372 
373  /* Decode event */
374  ev_code = EFX_QWORD_FIELD(*evt, ESF_DZ_EV_CODE);
375 
376  switch (ev_code) {
378  desc_ptr = EFX_QWORD_FIELD(*evt, ESF_DZ_TX_DESCR_INDX);
379  efx_hunt_transmit_done(efx, desc_ptr);
380  break;
381 
383  len = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_BYTES);
384  next_ptr_lbits = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_DSC_PTR_LBITS);
385  rx_cont = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_CONT);
386 
387  /* We don't expect to receive scattered packets, so drop the
388  * packet if RX_CONT is set on the current or previous event, or
389  * if len is zero.
390  */
391  packet_drop = (len == 0) | (rx_cont << 1) |
392  (rxq->rx_cont_prev << 2);
393  efx_hunt_receive(efx, next_ptr_lbits, len, packet_drop);
394  rxq->rx_cont_prev = rx_cont;
395  return 1;
396 
397  default:
398  DBGCP(efx, "Unknown event type %d\n", ev_code);
399  break;
400  }
401  return 0;
402 }
403 
405 {
406  struct efx_nic *efx = netdev_priv(netdev);
407  struct efx_ev_queue *evq = &efx->evq;
408  efx_event_t *evt;
409  int budget = 10;
410 
411  /* Read the event queue by directly looking for events
412  * (we don't even bother to read the eventq write ptr)
413  */
414  evt = evq->ring + evq->read_ptr;
415  while (efx_hunt_event_present(evt) && (budget > 0)) {
416  DBGCP(efx, "Event at index 0x%x address %p is "
417  EFX_QWORD_FMT "\n", evq->read_ptr,
418  evt, EFX_QWORD_VAL(*evt));
419 
420  budget -= efx_hunt_handle_event(efx, evt);
421 
422  /* Clear the event */
423  EFX_SET_QWORD(*evt);
424 
425  /* Move to the next event. We don't ack the event
426  * queue until the end
427  */
428  evq->read_ptr = ((evq->read_ptr + 1) & EFX_EVQ_MASK);
429  evt = evq->ring + evq->read_ptr;
430  }
431 
432  /* Push more rx buffers if needed */
433  efx_hunt_rxq_fill(efx);
434 
435  /* Clear any pending interrupts */
437 
438  /* Ack the event queue if interrupts are enabled */
439  if (efx->int_en)
441 }
442 
443 void efx_hunt_irq(struct net_device *netdev, int enable)
444 {
445  struct efx_nic *efx = netdev_priv(netdev);
446 
447  efx->int_en = enable;
448 
449  /* If interrupts are enabled, prime the event queue. Otherwise ack any
450  * pending interrupts
451  */
452  if (enable)
454  else if (efx->netdev->state & NETDEV_OPEN)
456 }
457 
458 /*******************************************************************************
459  *
460  *
461  * Initialization and Close
462  *
463  *
464  ******************************************************************************/
466 {
467  struct efx_nic *efx = netdev_priv(netdev);
469 
470  /* Set interrupt moderation to 0*/
472  ERF_DZ_TC_TIMER_MODE, 0,
473  ERF_DZ_TC_TIMER_VAL, 0);
474  efx_writel_page(efx, &cmd, 0, ER_DZ_EVQ_TMR);
475 
476  /* Ack the eventq */
477  if (efx->int_en)
479 
480  /* Push receive buffers */
481  efx_hunt_rxq_fill(efx);
482 
483  return 0;
484 }
485 
487 {
488  struct efx_nic *efx = netdev_priv(netdev);
489  struct efx_rx_queue *rxq = &efx->rxq;
490  struct efx_tx_queue *txq = &efx->txq;
491  int i;
492 
493  /* Complete outstanding descriptors */
494  for (i = 0; i < EFX_NUM_RX_DESC; i++) {
495  if (rxq->buf[i]) {
496  free_iob(rxq->buf[i]);
497  rxq->buf[i] = NULL;
498  }
499  }
500 
501  for (i = 0; i < EFX_TXD_SIZE; i++) {
502  if (txq->buf[i]) {
503  netdev_tx_complete(efx->netdev, txq->buf[i]);
504  txq->buf[i] = NULL;
505  }
506  }
507 
508  /* Clear interrupts */
510 }
#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW
Definition: ef10_regs.h:339
void efx_hunt_irq(struct net_device *netdev, int enable)
Definition: efx_hunt.c:443
#define iob_pull(iobuf, len)
Definition: iobuf.h:98
#define ER_DZ_BIU_INT_ISR
Definition: ef10_regs.h:65
#define EFX_QWORD_FIELD
Definition: efx_bitfield.h:242
static void * netdev_priv(struct net_device *netdev)
Get driver private area for this network device.
Definition: netdevice.h:566
iPXE I/O API
void efx_hunt_poll(struct net_device *netdev)
Definition: efx_hunt.c:404
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition: netdevice.h:746
#define efx_writel_table(efx, value, index, reg)
Definition: efx_common.h:216
#define iob_put(iobuf, len)
Definition: iobuf.h:116
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:501
int efx_hunt_rx_init(struct net_device *netdev, dma_addr_t *dma_addr)
Definition: efx_hunt.c:270
static int efx_hunt_event_present(efx_event_t *event)
See if an event is present.
Definition: efx_hunt.c:336
#define EFX_TXD_MASK
Definition: efx_common.h:59
struct efx_special_buffer entry
Definition: efx_common.h:115
dma_addr_t dma_addr
Definition: efx_common.h:84
Hardware access.
Definition: efx_common.h:147
A transmit queue.
Definition: efx_common.h:89
#define NETDEV_OPEN
Network device is open.
Definition: netdevice.h:432
A receive queue.
Definition: efx_common.h:107
#define ER_DZ_EVQ_TMR
Definition: ef10_regs.h:89
Error codes.
efx_dword_t dword[2]
Definition: efx_bitfield.h:95
#define EFX_EVQ_MASK
Definition: efx_common.h:61
#define EFX_EVQ_SIZE
Definition: efx_common.h:60
unsigned int read_ptr
Definition: efx_common.h:121
I/O buffers.
void free_iob(struct io_buffer *iobuf)
Free I/O buffer.
Definition: iobuf.c:145
static void efx_hunt_build_rx_desc(efx_rx_desc_t *rxd, struct io_buffer *iob)
Definition: efx_hunt.c:180
#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH
Definition: ef10_regs.h:338
efx_rx_desc_t * ring
Definition: efx_common.h:109
bool workaround_35388
Definition: efx_common.h:175
unsigned int state
Current device state.
Definition: netdevice.h:389
int efx_hunt_ev_init(struct net_device *netdev, dma_addr_t *dma_addr)
Definition: efx_hunt.c:294
unsigned long dma_addr_t
Definition: bnx2.h:20
#define ER_DD_EVQ_INDIRECT
Definition: ef10_regs.h:335
static void efx_hunt_rxq_fill(struct efx_nic *efx)
Definition: efx_hunt.c:201
#define efx_writel_page(efx, value, index, reg)
Definition: efx_common.h:219
uint32_t buffer
Buffer index (or NETVSC_RNDIS_NO_BUFFER)
Definition: netvsc.h:16
int efx_hunt_tx_init(struct net_device *netdev, dma_addr_t *dma_addr)
Definition: efx_hunt.c:155
unsigned int rx_cont_prev
Definition: efx_common.h:124
A doubleword (4 byte) datatype - little-endian in HW.
Definition: efx_bitfield.h:87
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:128
Dynamic memory allocation.
#define ESE_DZ_EV_CODE_RX_EV
Definition: ef10_regs.h:138
#define ER_DZ_EVQ_RPTR
Definition: ef10_regs.h:80
static void efx_hunt_notify_rx_desc(struct efx_nic *efx)
Definition: efx_hunt.c:190
static void efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop)
Definition: efx_hunt.c:240
#define EFX_POPULATE_QWORD_4(qword,...)
Definition: efx_bitfield.h:390
#define ENOMEM
Not enough space.
Definition: errno.h:534
A buffer table allocation backing a tx dma, rx dma or eventq.
Definition: efx_common.h:83
int int_en
INT_REG_KER.
Definition: efx_common.h:171
#define EFX_NUM_RX_DESC_MASK
Definition: efx_common.h:69
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
void efx_hunt_close(struct net_device *netdev)
Definition: efx_hunt.c:486
#define DBGP(...)
Definition: compiler.h:532
Assertions.
#define EBADMSG
Bad message.
Definition: errno.h:333
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
efx_tx_desc_t * ring
Definition: efx_common.h:91
Efx bitfield access.
#define EFX_SET_QWORD(qword)
Definition: efx_bitfield.h:400
static void efx_hunt_clear_interrupts(struct efx_nic *efx)
Definition: efx_hunt.c:313
static void efx_hunt_build_tx_desc(efx_tx_desc_t *txd, struct io_buffer *iob)
Definition: efx_hunt.c:75
static struct net_device * netdev
Definition: gdbudp.c:52
#define EFX_RXD_SIZE
Definition: efx_common.h:56
efx_qword_t efx_tx_desc_t
Definition: efx_common.h:52
#define EFX_NUM_RX_DESC
Definition: efx_common.h:68
#define ER_DZ_RX_DESC_UPD
Definition: ef10_regs.h:98
EF10 hardware architecture definitions.
#define EFX_POPULATE_QWORD_2(qword,...)
Definition: efx_bitfield.h:394
struct efx_special_buffer entry
Definition: efx_common.h:135
#define EFX_RXD_MASK
Definition: efx_common.h:57
unsigned int read_ptr
Definition: efx_common.h:103
#define EFX_DWORD_IS_ALL_ONES(dword)
Definition: efx_bitfield.h:230
unsigned int write_ptr
Definition: efx_common.h:100
#define txd
Definition: davicom.c:143
union aes_table_entry entry[256]
Table entries, indexed by S(N)
Definition: aes.c:26
static unsigned int unsigned int reg
Definition: intel.h:245
PCI bus.
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:151
static void efx_hunt_transmit_done(struct efx_nic *efx, int id)
Definition: efx_hunt.c:131
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
A network device.
Definition: netdevice.h:348
#define EFX_RX_BUF_SIZE
Definition: efx_common.h:74
#define EFX_QWORD_VAL(qword)
Definition: efx_bitfield.h:112
#define EFX_POPULATE_DWORD_1(dword,...)
Definition: efx_bitfield.h:423
#define DBGCIO(...)
Definition: compiler.h:556
struct efx_rx_queue rxq
Definition: efx_common.h:164
#define EFX_POPULATE_DWORD_2(dword,...)
Definition: efx_bitfield.h:421
unsigned int rx_prefix_size
Definition: efx_common.h:168
#define ESE_DZ_EV_CODE_TX_EV
Definition: ef10_regs.h:137
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:470
efx_qword_t efx_rx_desc_t
Definition: efx_common.h:51
int efx_hunt_open(struct net_device *netdev)
Definition: efx_hunt.c:465
efx_qword_t efx_event_t
Definition: efx_common.h:53
efx_event_t * ring
Definition: efx_common.h:132
Network device management.
uint32_t len
Length.
Definition: ena.h:14
int efx_hunt_transmit(struct net_device *netdev, struct io_buffer *iob)
Definition: efx_hunt.c:100
#define ENOBUFS
No buffer space available.
Definition: errno.h:498
#define ER_DZ_TX_DESC_UPD_DWORD
Definition: ef10_regs.h:325
struct efx_ev_queue evq
Definition: efx_common.h:166
#define EFX_QWORD_FMT
Definition: efx_bitfield.h:108
struct efx_tx_queue txq
Definition: efx_common.h:165
void * data
Start of data.
Definition: iobuf.h:44
struct io_buffer * buf[EFX_TXD_SIZE]
Definition: efx_common.h:94
static unsigned int efx_hunt_handle_event(struct efx_nic *efx, efx_event_t *evt)
Definition: efx_hunt.c:366
#define EFX_BUF_ALIGN
Definition: efx_common.h:55
static void *__malloc malloc_dma(size_t size, size_t phys_align)
Allocate memory for DMA.
Definition: malloc.h:66
#define EFX_TXD_SIZE
Definition: efx_common.h:58
#define DBGCP(...)
Definition: compiler.h:539
struct io_buffer * buf[EFX_NUM_RX_DESC]
Definition: efx_common.h:112
unsigned int write_ptr
Definition: efx_common.h:118
An event queue.
Definition: efx_common.h:128
struct efx_special_buffer entry
Definition: efx_common.h:97
static void * efx_hunt_alloc_special_buffer(int bytes, struct efx_special_buffer *entry)
Definition: efx_hunt.c:43
void efx_hunt_free_special_buffer(void *buf, int bytes)
Definition: efx_hunt.c:38
unsigned int read_ptr
Definition: efx_common.h:138
#define ERF_DD_EVQ_IND_RPTR_WIDTH
Definition: ef10_regs.h:341
uint8_t bytes[64]
Definition: ib_mad.h:16
static void free_dma(void *ptr, size_t size)
Free memory allocated with malloc_dma()
Definition: malloc.h:81
void efx_readl(struct efx_nic *efx, efx_dword_t *value, unsigned int reg)
Definition: efx_common.c:56
static void efx_hunt_notify_tx_desc(struct efx_nic *efx)
Definition: efx_hunt.c:89
static void efx_hunt_evq_read_ack(struct efx_nic *efx)
Definition: efx_hunt.c:343
#define NULL
NULL pointer (VOID *)
Definition: Base.h:362
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
A quadword (8 byte) datatype - little-endian in HW.
Definition: efx_bitfield.h:92
struct net_device * netdev
Definition: efx_common.h:148
#define rxd
Definition: davicom.c:145
void * memset(void *dest, int character, size_t len) __nonnull
A persistent I/O buffer.
Definition: iobuf.h:32