iPXE
gve.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2024 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  *
19  * You can also choose to distribute this program under the terms of
20  * the Unmodified Binary Distribution Licence (as given in the file
21  * COPYING.UBDL), provided that you have satisfied its requirements.
22  */
23 
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25 
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <errno.h>
30 #include <assert.h>
31 #include <byteswap.h>
32 #include <ipxe/netdevice.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/if_ether.h>
35 #include <ipxe/iobuf.h>
36 #include <ipxe/dma.h>
37 #include <ipxe/pci.h>
38 #include <ipxe/fault.h>
39 #include "gve.h"
40 
41 /** @file
42  *
43  * Google Virtual Ethernet network driver
44  *
45  */
46 
47 /* Disambiguate the various error causes */
48 #define EINFO_EIO_ADMIN_UNSET \
49  __einfo_uniqify ( EINFO_EIO, 0x00, "Uncompleted" )
50 #define EIO_ADMIN_UNSET \
51  __einfo_error ( EINFO_EIO_ADMIN_UNSET )
52 #define EINFO_EIO_ADMIN_ABORTED \
53  __einfo_uniqify ( EINFO_EIO, 0x10, "Aborted" )
54 #define EIO_ADMIN_ABORTED \
55  __einfo_error ( EINFO_EIO_ADMIN_ABORTED )
56 #define EINFO_EIO_ADMIN_EXISTS \
57  __einfo_uniqify ( EINFO_EIO, 0x11, "Already exists" )
58 #define EIO_ADMIN_EXISTS \
59  __einfo_error ( EINFO_EIO_ADMIN_EXISTS )
60 #define EINFO_EIO_ADMIN_CANCELLED \
61  __einfo_uniqify ( EINFO_EIO, 0x12, "Cancelled" )
62 #define EIO_ADMIN_CANCELLED \
63  __einfo_error ( EINFO_EIO_ADMIN_CANCELLED )
64 #define EINFO_EIO_ADMIN_DATALOSS \
65  __einfo_uniqify ( EINFO_EIO, 0x13, "Data loss" )
66 #define EIO_ADMIN_DATALOSS \
67  __einfo_error ( EINFO_EIO_ADMIN_DATALOSS )
68 #define EINFO_EIO_ADMIN_DEADLINE \
69  __einfo_uniqify ( EINFO_EIO, 0x14, "Deadline exceeded" )
70 #define EIO_ADMIN_DEADLINE \
71  __einfo_error ( EINFO_EIO_ADMIN_DEADLINE )
72 #define EINFO_EIO_ADMIN_PRECONDITION \
73  __einfo_uniqify ( EINFO_EIO, 0x15, "Failed precondition" )
74 #define EIO_ADMIN_PRECONDITION \
75  __einfo_error ( EINFO_EIO_ADMIN_PRECONDITION )
76 #define EINFO_EIO_ADMIN_INTERNAL \
77  __einfo_uniqify ( EINFO_EIO, 0x16, "Internal error" )
78 #define EIO_ADMIN_INTERNAL \
79  __einfo_error ( EINFO_EIO_ADMIN_INTERNAL )
80 #define EINFO_EIO_ADMIN_INVAL \
81  __einfo_uniqify ( EINFO_EIO, 0x17, "Invalid argument" )
82 #define EIO_ADMIN_INVAL \
83  __einfo_error ( EINFO_EIO_ADMIN_INVAL )
84 #define EINFO_EIO_ADMIN_NOT_FOUND \
85  __einfo_uniqify ( EINFO_EIO, 0x18, "Not found" )
86 #define EIO_ADMIN_NOT_FOUND \
87  __einfo_error ( EINFO_EIO_ADMIN_NOT_FOUND )
88 #define EINFO_EIO_ADMIN_RANGE \
89  __einfo_uniqify ( EINFO_EIO, 0x19, "Out of range" )
90 #define EIO_ADMIN_RANGE \
91  __einfo_error ( EINFO_EIO_ADMIN_RANGE )
92 #define EINFO_EIO_ADMIN_PERM \
93  __einfo_uniqify ( EINFO_EIO, 0x1a, "Permission denied" )
94 #define EIO_ADMIN_PERM \
95  __einfo_error ( EINFO_EIO_ADMIN_PERM )
96 #define EINFO_EIO_ADMIN_UNAUTH \
97  __einfo_uniqify ( EINFO_EIO, 0x1b, "Unauthenticated" )
98 #define EIO_ADMIN_UNAUTH \
99  __einfo_error ( EINFO_EIO_ADMIN_UNAUTH )
100 #define EINFO_EIO_ADMIN_RESOURCE \
101  __einfo_uniqify ( EINFO_EIO, 0x1c, "Resource exhausted" )
102 #define EIO_ADMIN_RESOURCE \
103  __einfo_error ( EINFO_EIO_ADMIN_RESOURCE )
104 #define EINFO_EIO_ADMIN_UNAVAIL \
105  __einfo_uniqify ( EINFO_EIO, 0x1d, "Unavailable" )
106 #define EIO_ADMIN_UNAVAIL \
107  __einfo_error ( EINFO_EIO_ADMIN_UNAVAIL )
108 #define EINFO_EIO_ADMIN_NOTSUP \
109  __einfo_uniqify ( EINFO_EIO, 0x1e, "Unimplemented" )
110 #define EIO_ADMIN_NOTSUP \
111  __einfo_error ( EINFO_EIO_ADMIN_NOTSUP )
112 #define EINFO_EIO_ADMIN_UNKNOWN \
113  __einfo_uniqify ( EINFO_EIO, 0x1f, "Unknown error" )
114 #define EIO_ADMIN_UNKNOWN \
115  __einfo_error ( EINFO_EIO_ADMIN_UNKNOWN )
116 #define EIO_ADMIN( status ) \
117  EUNIQ ( EINFO_EIO, ( (status) & 0x1f ), \
118  EIO_ADMIN_UNSET, EIO_ADMIN_ABORTED, EIO_ADMIN_EXISTS, \
119  EIO_ADMIN_CANCELLED, EIO_ADMIN_DATALOSS, \
120  EIO_ADMIN_DEADLINE, EIO_ADMIN_PRECONDITION, \
121  EIO_ADMIN_INTERNAL, EIO_ADMIN_INVAL, \
122  EIO_ADMIN_NOT_FOUND, EIO_ADMIN_RANGE, EIO_ADMIN_PERM, \
123  EIO_ADMIN_UNAUTH, EIO_ADMIN_RESOURCE, \
124  EIO_ADMIN_UNAVAIL, EIO_ADMIN_NOTSUP, EIO_ADMIN_UNKNOWN )
125 
126 /******************************************************************************
127  *
128  * Device reset
129  *
130  ******************************************************************************
131  */
132 
133 /**
134  * Reset hardware
135  *
136  * @v gve GVE device
137  * @ret rc Return status code
138  */
139 static int gve_reset ( struct gve_nic *gve ) {
140  uint32_t pfn;
141  unsigned int i;
142 
143  /* Skip reset if admin queue page frame number is already
144  * clear. Triggering a reset on an already-reset device seems
145  * to cause a delayed reset to be scheduled. This can cause
146  * the device to end up in a reset loop, where each attempt to
147  * recover from reset triggers another reset a few seconds
148  * later.
149  */
150  pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
151  if ( ! pfn ) {
152  DBGC ( gve, "GVE %p skipping reset\n", gve );
153  return 0;
154  }
155 
156  /* Clear admin queue page frame number */
157  writel ( 0, gve->cfg + GVE_CFG_ADMIN_PFN );
158  wmb();
159 
160  /* Wait for device to reset */
161  for ( i = 0 ; i < GVE_RESET_MAX_WAIT_MS ; i++ ) {
162 
163  /* Delay */
164  mdelay ( 1 );
165 
166  /* Check for reset completion */
167  pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
168  if ( ! pfn )
169  return 0;
170  }
171 
172  DBGC ( gve, "GVE %p reset timed out (PFN %#08x devstat %#08x)\n",
173  gve, bswap_32 ( pfn ),
174  bswap_32 ( readl ( gve->cfg + GVE_CFG_DEVSTAT ) ) );
175  return -ETIMEDOUT;
176 }
177 
178 /******************************************************************************
179  *
180  * Admin queue
181  *
182  ******************************************************************************
183  */
184 
185 /**
186  * Allocate admin queue
187  *
188  * @v gve GVE device
189  * @ret rc Return status code
190  */
191 static int gve_admin_alloc ( struct gve_nic *gve ) {
192  struct dma_device *dma = gve->dma;
193  struct gve_admin *admin = &gve->admin;
194  struct gve_irqs *irqs = &gve->irqs;
195  struct gve_events *events = &gve->events;
196  struct gve_scratch *scratch = &gve->scratch;
197  size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) );
198  size_t irqs_len = ( GVE_IRQ_COUNT * sizeof ( irqs->irq[0] ) );
199  size_t events_len = ( GVE_EVENT_MAX * sizeof ( events->event[0] ) );
200  size_t scratch_len = sizeof ( *scratch->buf );
201  int rc;
202 
203  /* Allocate admin queue */
204  admin->cmd = dma_alloc ( dma, &admin->map, admin_len, GVE_ALIGN );
205  if ( ! admin->cmd ) {
206  rc = -ENOMEM;
207  goto err_admin;
208  }
209 
210  /* Allocate interrupt channels */
211  irqs->irq = dma_alloc ( dma, &irqs->map, irqs_len, GVE_ALIGN );
212  if ( ! irqs->irq ) {
213  rc = -ENOMEM;
214  goto err_irqs;
215  }
216 
217  /* Allocate event counters */
218  events->event = dma_alloc ( dma, &events->map, events_len, GVE_ALIGN );
219  if ( ! events->event ) {
220  rc = -ENOMEM;
221  goto err_events;
222  }
223 
224  /* Allocate scratch buffer */
225  scratch->buf = dma_alloc ( dma, &scratch->map, scratch_len, GVE_ALIGN );
226  if ( ! scratch->buf ) {
227  rc = -ENOMEM;
228  goto err_scratch;
229  }
230 
231  DBGC ( gve, "GVE %p AQ at [%08lx,%08lx)\n",
232  gve, virt_to_phys ( admin->cmd ),
233  ( virt_to_phys ( admin->cmd ) + admin_len ) );
234  return 0;
235 
236  dma_free ( &scratch->map, scratch->buf, scratch_len );
237  err_scratch:
238  dma_free ( &events->map, events->event, events_len );
239  err_events:
240  dma_free ( &irqs->map, irqs->irq, irqs_len );
241  err_irqs:
242  dma_free ( &admin->map, admin->cmd, admin_len );
243  err_admin:
244  return rc;
245 }
246 
247 /**
248  * Free admin queue
249  *
250  * @v gve GVE device
251  */
252 static void gve_admin_free ( struct gve_nic *gve ) {
253  struct gve_admin *admin = &gve->admin;
254  struct gve_irqs *irqs = &gve->irqs;
255  struct gve_events *events = &gve->events;
256  struct gve_scratch *scratch = &gve->scratch;
257  size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) );
258  size_t irqs_len = ( GVE_IRQ_COUNT * sizeof ( irqs->irq[0] ) );
259  size_t events_len = ( GVE_EVENT_MAX * sizeof ( events->event[0] ) );
260  size_t scratch_len = sizeof ( *scratch->buf );
261 
262  /* Free scratch buffer */
263  dma_free ( &scratch->map, scratch->buf, scratch_len );
264 
265  /* Free event counter */
266  dma_free ( &events->map, events->event, events_len );
267 
268  /* Free interrupt channels */
269  dma_free ( &irqs->map, irqs->irq, irqs_len );
270 
271  /* Free admin queue */
272  dma_free ( &admin->map, admin->cmd, admin_len );
273 }
274 
275 /**
276  * Enable admin queue
277  *
278  * @v gve GVE device
279  */
280 static void gve_admin_enable ( struct gve_nic *gve ) {
281  struct gve_admin *admin = &gve->admin;
282  size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) );
284 
285  /* Reset queue */
286  admin->prod = 0;
287 
288  /* Program queue addresses and capabilities */
289  base = dma ( &admin->map, admin->cmd );
291  gve->cfg + GVE_CFG_ADMIN_PFN );
292  writel ( bswap_32 ( base & 0xffffffffUL ),
293  gve->cfg + GVE_CFG_ADMIN_BASE_LO );
294  if ( sizeof ( base ) > sizeof ( uint32_t ) ) {
295  writel ( bswap_32 ( ( ( uint64_t ) base ) >> 32 ),
296  gve->cfg + GVE_CFG_ADMIN_BASE_HI );
297  } else {
298  writel ( 0, gve->cfg + GVE_CFG_ADMIN_BASE_HI );
299  }
300  writel ( bswap_16 ( admin_len ), gve->cfg + GVE_CFG_ADMIN_LEN );
302 }
303 
304 /**
305  * Get next available admin queue command slot
306  *
307  * @v gve GVE device
308  * @ret cmd Admin queue command
309  */
310 static union gve_admin_command * gve_admin_command ( struct gve_nic *gve ) {
311  struct gve_admin *admin = &gve->admin;
312  union gve_admin_command *cmd;
313  unsigned int index;
314 
315  /* Get next command slot */
316  index = admin->prod;
317  cmd = &admin->cmd[ index % GVE_ADMIN_COUNT ];
318 
319  /* Initialise request */
320  memset ( cmd, 0, sizeof ( *cmd ) );
321 
322  return cmd;
323 }
324 
325 /**
326  * Wait for admin queue command to complete
327  *
328  * @v gve GVE device
329  * @ret rc Return status code
330  */
331 static int gve_admin_wait ( struct gve_nic *gve ) {
332  struct gve_admin *admin = &gve->admin;
333  uint32_t evt;
334  uint32_t pfn;
335  unsigned int i;
336 
337  /* Wait for any outstanding commands to complete */
338  for ( i = 0 ; i < GVE_ADMIN_MAX_WAIT_MS ; i++ ) {
339 
340  /* Check event counter */
341  rmb();
342  evt = bswap_32 ( readl ( gve->cfg + GVE_CFG_ADMIN_EVT ) );
343  if ( evt == admin->prod )
344  return 0;
345 
346  /* Check for device reset */
347  pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
348  if ( ! pfn )
349  break;
350 
351  /* Delay */
352  mdelay ( 1 );
353  }
354 
355  DBGC ( gve, "GVE %p AQ %#02x %s (completed %#02x, status %#08x)\n",
356  gve, admin->prod, ( pfn ? "timed out" : "saw reset" ), evt,
357  bswap_32 ( readl ( gve->cfg + GVE_CFG_DEVSTAT ) ) );
358  return ( pfn ? -ETIMEDOUT : -ECONNRESET );
359 }
360 
361 /**
362  * Issue admin queue command
363  *
364  * @v gve GVE device
365  * @ret rc Return status code
366  */
367 static int gve_admin ( struct gve_nic *gve ) {
368  struct gve_admin *admin = &gve->admin;
369  union gve_admin_command *cmd;
370  unsigned int index;
373  int rc;
374 
375  /* Ensure admin queue is idle */
376  if ( ( rc = gve_admin_wait ( gve ) ) != 0 )
377  return rc;
378 
379  /* Get next command slot */
380  index = admin->prod;
381  cmd = &admin->cmd[ index % GVE_ADMIN_COUNT ];
382  opcode = cmd->hdr.opcode;
383  DBGC2 ( gve, "GVE %p AQ %#02x command %#04x request:\n",
384  gve, index, opcode );
385  DBGC2_HDA ( gve, 0, cmd, sizeof ( *cmd ) );
386 
387  /* Increment producer counter */
388  admin->prod++;
389 
390  /* Ring doorbell */
391  wmb();
392  writel ( bswap_32 ( admin->prod ), gve->cfg + GVE_CFG_ADMIN_DB );
393 
394  /* Wait for command to complete */
395  if ( ( rc = gve_admin_wait ( gve ) ) != 0 )
396  return rc;
397 
398  /* Check command status */
399  status = be32_to_cpu ( cmd->hdr.status );
400  if ( status != GVE_ADMIN_STATUS_OK ) {
401  rc = -EIO_ADMIN ( status );
402  DBGC ( gve, "GVE %p AQ %#02x command %#04x failed: %#08x\n",
403  gve, index, opcode, status );
404  DBGC_HDA ( gve, 0, cmd, sizeof ( *cmd ) );
405  DBGC ( gve, "GVE %p AQ error: %s\n", gve, strerror ( rc ) );
406  return rc;
407  }
408 
409  DBGC2 ( gve, "GVE %p AQ %#02x command %#04x result:\n",
410  gve, index, opcode );
411  DBGC2_HDA ( gve, 0, cmd, sizeof ( *cmd ) );
412  return 0;
413 }
414 
415 /**
416  * Issue simple admin queue command
417  *
418  * @v gve GVE device
419  * @v opcode Operation code
420  * @v id ID parameter (or zero if not applicable)
421  * @ret rc Return status code
422  *
423  * Several admin queue commands take either an empty parameter list or
424  * a single 32-bit ID parameter.
425  */
426 static int gve_admin_simple ( struct gve_nic *gve, unsigned int opcode,
427  unsigned int id ) {
428  union gve_admin_command *cmd;
429  int rc;
430 
431  /* Construct request */
432  cmd = gve_admin_command ( gve );
433  cmd->hdr.opcode = opcode;
434  cmd->simple.id = cpu_to_be32 ( id );
435 
436  /* Issue command */
437  if ( ( rc = gve_admin ( gve ) ) != 0 )
438  return rc;
439 
440  return 0;
441 }
442 
443 /**
444  * Get device descriptor
445  *
446  * @v gve GVE device
447  * @ret rc Return status code
448  */
449 static int gve_describe ( struct gve_nic *gve ) {
450  struct net_device *netdev = gve->netdev;
451  struct gve_device_descriptor *desc = &gve->scratch.buf->desc;
452  union gve_admin_command *cmd;
453  int rc;
454 
455  /* Construct request */
456  cmd = gve_admin_command ( gve );
457  cmd->hdr.opcode = GVE_ADMIN_DESCRIBE;
458  cmd->desc.addr = cpu_to_be64 ( dma ( &gve->scratch.map, desc ) );
459  cmd->desc.ver = cpu_to_be32 ( GVE_ADMIN_DESCRIBE_VER );
460  cmd->desc.len = cpu_to_be32 ( sizeof ( *desc ) );
461 
462  /* Issue command */
463  if ( ( rc = gve_admin ( gve ) ) != 0 )
464  return rc;
465  DBGC2 ( gve, "GVE %p device descriptor:\n", gve );
466  DBGC2_HDA ( gve, 0, desc, sizeof ( *desc ) );
467 
468  /* Extract queue parameters */
469  gve->events.count = be16_to_cpu ( desc->counters );
470  if ( gve->events.count > GVE_EVENT_MAX )
471  gve->events.count = GVE_EVENT_MAX;
472  gve->tx.count = be16_to_cpu ( desc->tx_count );
473  gve->rx.count = be16_to_cpu ( desc->rx_count );
474  DBGC ( gve, "GVE %p using %d TX, %d RX, %d/%d events\n",
475  gve, gve->tx.count, gve->rx.count, gve->events.count,
476  be16_to_cpu ( desc->counters ) );
477 
478  /* Extract network parameters */
479  build_assert ( sizeof ( desc->mac ) == ETH_ALEN );
480  memcpy ( netdev->hw_addr, &desc->mac, sizeof ( desc->mac ) );
481  netdev->mtu = be16_to_cpu ( desc->mtu );
483  DBGC ( gve, "GVE %p MAC %s (\"%s\") MTU %zd\n",
484  gve, eth_ntoa ( netdev->hw_addr ),
485  inet_ntoa ( desc->mac.in ), netdev->mtu );
486 
487  return 0;
488 }
489 
490 /**
491  * Configure device resources
492  *
493  * @v gve GVE device
494  * @ret rc Return status code
495  */
496 static int gve_configure ( struct gve_nic *gve ) {
497  struct gve_events *events = &gve->events;
498  struct gve_irqs *irqs = &gve->irqs;
499  union gve_admin_command *cmd;
500  unsigned int db_off;
501  unsigned int i;
502  int rc;
503 
504  /* Construct request */
505  cmd = gve_admin_command ( gve );
506  cmd->hdr.opcode = GVE_ADMIN_CONFIGURE;
507  cmd->conf.events =
508  cpu_to_be64 ( dma ( &events->map, events->event ) );
509  cmd->conf.irqs =
510  cpu_to_be64 ( dma ( &irqs->map, irqs->irq ) );
511  cmd->conf.num_events = cpu_to_be32 ( events->count );
512  cmd->conf.num_irqs = cpu_to_be32 ( GVE_IRQ_COUNT );
513  cmd->conf.irq_stride = cpu_to_be32 ( sizeof ( irqs->irq[0] ) );
514 
515  /* Issue command */
516  if ( ( rc = gve_admin ( gve ) ) != 0 )
517  return rc;
518 
519  /* Disable all interrupts */
520  for ( i = 0 ; i < GVE_IRQ_COUNT ; i++ ) {
521  db_off = ( be32_to_cpu ( irqs->irq[i].db_idx ) *
522  sizeof ( uint32_t ) );
523  DBGC ( gve, "GVE %p IRQ %d doorbell +%#04x\n", gve, i, db_off );
524  irqs->db[i] = ( gve->db + db_off );
525  writel ( bswap_32 ( GVE_IRQ_DISABLE ), irqs->db[i] );
526  }
527 
528  return 0;
529 }
530 
531 /**
532  * Deconfigure device resources
533  *
534  * @v gve GVE device
535  * @ret rc Return status code
536  */
537 static int gve_deconfigure ( struct gve_nic *gve ) {
538  int rc;
539 
540  /* Issue command (with meaningless ID) */
541  if ( ( rc = gve_admin_simple ( gve, GVE_ADMIN_DECONFIGURE, 0 ) ) != 0 )
542  return rc;
543 
544  return 0;
545 }
546 
547 /**
548  * Register queue page list
549  *
550  * @v gve GVE device
551  * @v qpl Queue page list
552  * @ret rc Return status code
553  */
554 static int gve_register ( struct gve_nic *gve, struct gve_qpl *qpl ) {
555  struct gve_pages *pages = &gve->scratch.buf->pages;
556  union gve_admin_command *cmd;
558  unsigned int i;
559  int rc;
560 
561  /* Build page address list */
562  for ( i = 0 ; i < qpl->count ; i++ ) {
563  addr = user_to_phys ( qpl->data, ( i * GVE_PAGE_SIZE ) );
564  pages->addr[i] = cpu_to_be64 ( dma_phys ( &qpl->map, addr ) );
565  }
566 
567  /* Construct request */
568  cmd = gve_admin_command ( gve );
569  cmd->hdr.opcode = GVE_ADMIN_REGISTER;
570  cmd->reg.id = cpu_to_be32 ( qpl->id );
571  cmd->reg.count = cpu_to_be32 ( qpl->count );
572  cmd->reg.addr = cpu_to_be64 ( dma ( &gve->scratch.map, pages ) );
573  cmd->reg.size = cpu_to_be64 ( GVE_PAGE_SIZE );
574 
575  /* Issue command */
576  if ( ( rc = gve_admin ( gve ) ) != 0 )
577  return rc;
578 
579  return 0;
580 }
581 
582 /**
583  * Unregister page list
584  *
585  * @v gve GVE device
586  * @v qpl Queue page list
587  * @ret rc Return status code
588  */
589 static int gve_unregister ( struct gve_nic *gve, struct gve_qpl *qpl ) {
590  int rc;
591 
592  /* Issue command */
593  if ( ( rc = gve_admin_simple ( gve, GVE_ADMIN_UNREGISTER,
594  qpl->id ) ) != 0 ) {
595  return rc;
596  }
597 
598  return 0;
599 }
600 
601 /**
602  * Construct command to create transmit queue
603  *
604  * @v queue Transmit queue
605  * @v cmd Admin queue command
606  */
607 static void gve_create_tx_param ( struct gve_queue *queue,
608  union gve_admin_command *cmd ) {
609  struct gve_admin_create_tx *create = &cmd->create_tx;
610  const struct gve_queue_type *type = queue->type;
611  physaddr_t desc = user_to_phys ( queue->desc, 0 );
612 
613  /* Construct request parameters */
614  create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) );
615  create->desc = cpu_to_be64 ( dma_phys ( &queue->desc_map, desc ) );
616  create->qpl_id = cpu_to_be32 ( type->qpl );
617  create->notify_id = cpu_to_be32 ( type->irq );
618 }
619 
620 /**
621  * Construct command to create receive queue
622  *
623  * @v queue Receive queue
624  * @v cmd Admin queue command
625  */
626 static void gve_create_rx_param ( struct gve_queue *queue,
627  union gve_admin_command *cmd ) {
628  struct gve_admin_create_rx *create = &cmd->create_rx;
629  const struct gve_queue_type *type = queue->type;
630  physaddr_t desc = user_to_phys ( queue->desc, 0 );
631  physaddr_t cmplt = user_to_phys ( queue->cmplt, 0 );
632 
633  /* Construct request parameters */
634  create->notify_id = cpu_to_be32 ( type->irq );
635  create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) );
636  create->desc = cpu_to_be64 ( dma_phys ( &queue->desc_map, desc ) );
637  create->cmplt = cpu_to_be64 ( dma_phys ( &queue->cmplt_map, cmplt ) );
638  create->qpl_id = cpu_to_be32 ( type->qpl );
639  create->bufsz = cpu_to_be16 ( GVE_BUF_SIZE );
640 }
641 
642 /**
643  * Create transmit or receive queue
644  *
645  * @v gve GVE device
646  * @v queue Descriptor queue
647  * @ret rc Return status code
648  */
649 static int gve_create_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
650  const struct gve_queue_type *type = queue->type;
651  union gve_admin_command *cmd;
652  unsigned int db_off;
653  unsigned int evt_idx;
654  int rc;
655 
656  /* Reset queue */
657  queue->prod = 0;
658  queue->cons = 0;
659 
660  /* Construct request */
661  cmd = gve_admin_command ( gve );
662  cmd->hdr.opcode = type->create;
663  type->param ( queue, cmd );
664 
665  /* Issue command */
666  if ( ( rc = gve_admin ( gve ) ) != 0 )
667  return rc;
668 
669  /* Record indices */
670  db_off = ( be32_to_cpu ( queue->res->db_idx ) * sizeof ( uint32_t ) );
671  evt_idx = be32_to_cpu ( queue->res->evt_idx );
672  DBGC ( gve, "GVE %p %s doorbell +%#04x event counter %d\n",
673  gve, type->name, db_off, evt_idx );
674  queue->db = ( gve->db + db_off );
675  assert ( evt_idx < gve->events.count );
676  queue->event = &gve->events.event[evt_idx];
677  assert ( queue->event->count == 0 );
678 
679  return 0;
680 }
681 
682 /**
683  * Destroy transmit or receive queue
684  *
685  * @v gve GVE device
686  * @v queue Descriptor queue
687  * @ret rc Return status code
688  */
689 static int gve_destroy_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
690  const struct gve_queue_type *type = queue->type;
691  int rc;
692 
693  /* Issue command */
694  if ( ( rc = gve_admin_simple ( gve, type->destroy, 0 ) ) != 0 )
695  return rc;
696 
697  return 0;
698 }
699 
700 /******************************************************************************
701  *
702  * Network device interface
703  *
704  ******************************************************************************
705  */
706 
707 /**
708  * Allocate queue page list
709  *
710  * @v gve GVE device
711  * @v qpl Queue page list
712  * @v id Queue page list ID
713  * @v buffers Number of data buffers
714  * @ret rc Return status code
715  */
716 static int gve_alloc_qpl ( struct gve_nic *gve, struct gve_qpl *qpl,
717  uint32_t id, unsigned int buffers ) {
718  size_t len;
719 
720  /* Record ID */
721  qpl->id = id;
722 
723  /* Calculate number of pages required */
725  qpl->count = ( ( buffers + GVE_BUF_PER_PAGE - 1 ) / GVE_BUF_PER_PAGE );
726  assert ( qpl->count <= GVE_QPL_MAX );
727 
728  /* Allocate pages (as a single block) */
729  len = ( qpl->count * GVE_PAGE_SIZE );
730  qpl->data = dma_umalloc ( gve->dma, &qpl->map, len, GVE_ALIGN );
731  if ( ! qpl->data )
732  return -ENOMEM;
733 
734  DBGC ( gve, "GVE %p QPL %#08x at [%08lx,%08lx)\n",
735  gve, qpl->id, user_to_phys ( qpl->data, 0 ),
736  user_to_phys ( qpl->data, len ) );
737  return 0;
738 }
739 
740 /**
741  * Free queue page list
742  *
743  * @v gve GVE device
744  * @v qpl Queue page list
745  */
746 static void gve_free_qpl ( struct gve_nic *nic __unused,
747  struct gve_qpl *qpl ) {
748  size_t len = ( qpl->count * GVE_PAGE_SIZE );
749 
750  /* Free pages */
751  dma_ufree ( &qpl->map, qpl->data, len );
752 }
753 
754 /**
755  * Get buffer address (within queue page list address space)
756  *
757  * @v queue Descriptor queue
758  * @v index Buffer index
759  * @ret addr Buffer address within queue page list address space
760  */
761 static inline __attribute__ (( always_inline)) size_t
762 gve_address ( struct gve_queue *queue, unsigned int index ) {
763 
764  /* We allocate sufficient pages for the maximum fill level of
765  * buffers, and reuse the pages in strict rotation as we
766  * progress through the queue.
767  */
768  return ( ( index & ( queue->fill - 1 ) ) * GVE_BUF_SIZE );
769 }
770 
771 /**
772  * Get buffer address
773  *
774  * @v queue Descriptor queue
775  * @v index Buffer index
776  * @ret addr Buffer address
777  */
778 static inline __attribute__ (( always_inline )) userptr_t
779 gve_buffer ( struct gve_queue *queue, unsigned int index ) {
780 
781  /* Pages are currently allocated as a single contiguous block */
782  return userptr_add ( queue->qpl.data, gve_address ( queue, index ) );
783 }
784 
785 /**
786  * Calculate next receive sequence number
787  *
788  * @v seq Current sequence number, or zero to start sequence
789  * @ret next Next sequence number
790  */
791 static inline __attribute__ (( always_inline )) unsigned int
792 gve_next ( unsigned int seq ) {
793 
794  /* The receive completion sequence number is a modulo 7
795  * counter that cycles through the non-zero three-bit values 1
796  * to 7 inclusive.
797  *
798  * Since 7 is coprime to 2^n, this ensures that the sequence
799  * number changes each time that a new completion is written
800  * to memory.
801  *
802  * Since the counter takes only non-zero values, this ensures
803  * that the sequence number changes whenever a new completion
804  * is first written to a zero-initialised completion ring.
805  */
806  seq = ( ( seq + 1 ) & GVE_RX_SEQ_MASK );
807  return ( seq ? seq : 1 );
808 }
809 
810 /**
811  * Allocate descriptor queue
812  *
813  * @v gve GVE device
814  * @v queue Descriptor queue
815  * @ret rc Return status code
816  */
817 static int gve_alloc_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
818  const struct gve_queue_type *type = queue->type;
819  struct dma_device *dma = gve->dma;
820  size_t desc_len = ( queue->count * type->desc_len );
821  size_t cmplt_len = ( queue->count * type->cmplt_len );
822  size_t res_len = sizeof ( *queue->res );
823  struct gve_buffer buf;
824  size_t offset;
825  unsigned int i;
826  int rc;
827 
828  /* Sanity checks */
829  if ( ( queue->count == 0 ) ||
830  ( queue->count & ( queue->count - 1 ) ) ) {
831  DBGC ( gve, "GVE %p %s invalid queue size %d\n",
832  gve, type->name, queue->count );
833  rc = -EINVAL;
834  goto err_sanity;
835  }
836 
837  /* Calculate maximum fill level */
838  assert ( ( type->fill & ( type->fill - 1 ) ) == 0 );
839  queue->fill = type->fill;
840  if ( queue->fill > queue->count )
841  queue->fill = queue->count;
842  DBGC ( gve, "GVE %p %s using QPL %#08x with %d/%d descriptors\n",
843  gve, type->name, type->qpl, queue->fill, queue->count );
844 
845  /* Allocate queue page list */
846  if ( ( rc = gve_alloc_qpl ( gve, &queue->qpl, type->qpl,
847  queue->fill ) ) != 0 )
848  goto err_qpl;
849 
850  /* Allocate descriptors */
851  queue->desc = dma_umalloc ( dma, &queue->desc_map, desc_len,
852  GVE_ALIGN );
853  if ( ! queue->desc ) {
854  rc = -ENOMEM;
855  goto err_desc;
856  }
857  DBGC ( gve, "GVE %p %s descriptors at [%08lx,%08lx)\n",
858  gve, type->name, user_to_phys ( queue->desc, 0 ),
859  user_to_phys ( queue->desc, desc_len ) );
860 
861  /* Allocate completions */
862  if ( cmplt_len ) {
863  queue->cmplt = dma_umalloc ( dma, &queue->cmplt_map, cmplt_len,
864  GVE_ALIGN );
865  if ( ! queue->cmplt ) {
866  rc = -ENOMEM;
867  goto err_cmplt;
868  }
869  DBGC ( gve, "GVE %p %s completions at [%08lx,%08lx)\n",
870  gve, type->name, user_to_phys ( queue->cmplt, 0 ),
871  user_to_phys ( queue->cmplt, cmplt_len ) );
872  }
873 
874  /* Allocate queue resources */
875  queue->res = dma_alloc ( dma, &queue->res_map, res_len, GVE_ALIGN );
876  if ( ! queue->res ) {
877  rc = -ENOMEM;
878  goto err_res;
879  }
880  memset ( queue->res, 0, res_len );
881 
882  /* Populate descriptor offsets */
883  offset = ( type->desc_len - sizeof ( buf ) );
884  for ( i = 0 ; i < queue->count ; i++ ) {
885  buf.addr = cpu_to_be64 ( gve_address ( queue, i ) );
886  copy_to_user ( queue->desc, offset, &buf, sizeof ( buf ) );
887  offset += type->desc_len;
888  }
889 
890  return 0;
891 
892  dma_free ( &queue->res_map, queue->res, res_len );
893  err_res:
894  if ( cmplt_len )
895  dma_ufree ( &queue->cmplt_map, queue->cmplt, cmplt_len );
896  err_cmplt:
897  dma_ufree ( &queue->desc_map, queue->desc, desc_len );
898  err_desc:
899  gve_free_qpl ( gve, &queue->qpl );
900  err_qpl:
901  err_sanity:
902  return rc;
903 }
904 
905 /**
906  * Free descriptor queue
907  *
908  * @v gve GVE device
909  * @v queue Descriptor queue
910  */
911 static void gve_free_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
912  const struct gve_queue_type *type = queue->type;
913  size_t desc_len = ( queue->count * type->desc_len );
914  size_t cmplt_len = ( queue->count * type->cmplt_len );
915  size_t res_len = sizeof ( *queue->res );
916 
917  /* Free queue resources */
918  dma_free ( &queue->res_map, queue->res, res_len );
919 
920  /* Free completions, if applicable */
921  if ( cmplt_len )
922  dma_ufree ( &queue->cmplt_map, queue->cmplt, cmplt_len );
923 
924  /* Free descriptors */
925  dma_ufree ( &queue->desc_map, queue->desc, desc_len );
926 
927  /* Free queue page list */
928  gve_free_qpl ( gve, &queue->qpl );
929 }
930 
931 /**
932  * Start up device
933  *
934  * @v gve GVE device
935  * @ret rc Return status code
936  */
937 static int gve_start ( struct gve_nic *gve ) {
938  struct net_device *netdev = gve->netdev;
939  struct gve_queue *tx = &gve->tx;
940  struct gve_queue *rx = &gve->rx;
941  struct io_buffer *iobuf;
942  unsigned int i;
943  int rc;
944 
945  /* Cancel any pending transmissions */
946  for ( i = 0 ; i < ( sizeof ( gve->tx_iobuf ) /
947  sizeof ( gve->tx_iobuf[0] ) ) ; i++ ) {
948  iobuf = gve->tx_iobuf[i];
949  gve->tx_iobuf[i] = NULL;
950  if ( iobuf )
952  }
953 
954  /* Invalidate receive completions */
955  memset_user ( rx->cmplt, 0, 0, ( rx->count * rx->type->cmplt_len ) );
956 
957  /* Reset receive sequence */
958  gve->seq = gve_next ( 0 );
959 
960  /* Configure device resources */
961  if ( ( rc = gve_configure ( gve ) ) != 0 )
962  goto err_configure;
963 
964  /* Register transmit queue page list */
965  if ( ( rc = gve_register ( gve, &tx->qpl ) ) != 0 )
966  goto err_register_tx;
967 
968  /* Register receive queue page list */
969  if ( ( rc = gve_register ( gve, &rx->qpl ) ) != 0 )
970  goto err_register_rx;
971 
972  /* Create transmit queue */
973  if ( ( rc = gve_create_queue ( gve, tx ) ) != 0 )
974  goto err_create_tx;
975 
976  /* Create receive queue */
977  if ( ( rc = gve_create_queue ( gve, rx ) ) != 0 )
978  goto err_create_rx;
979 
980  return 0;
981 
982  gve_destroy_queue ( gve, rx );
983  err_create_rx:
984  gve_destroy_queue ( gve, tx );
985  err_create_tx:
986  gve_unregister ( gve, &rx->qpl );
987  err_register_rx:
988  gve_unregister ( gve, &tx->qpl );
989  err_register_tx:
990  gve_deconfigure ( gve );
991  err_configure:
992  return rc;
993 }
994 
995 /**
996  * Stop device
997  *
998  * @v gve GVE device
999  */
1000 static void gve_stop ( struct gve_nic *gve ) {
1001  struct gve_queue *tx = &gve->tx;
1002  struct gve_queue *rx = &gve->rx;
1003 
1004  /* Destroy queues */
1005  gve_destroy_queue ( gve, rx );
1006  gve_destroy_queue ( gve, tx );
1007 
1008  /* Unregister page lists */
1009  gve_unregister ( gve, &rx->qpl );
1010  gve_unregister ( gve, &tx->qpl );
1011 
1012  /* Deconfigure device */
1013  gve_deconfigure ( gve );
1014 }
1015 
1016 /**
1017  * Device startup process
1018  *
1019  * @v gve GVE device
1020  */
1021 static void gve_startup ( struct gve_nic *gve ) {
1022  struct net_device *netdev = gve->netdev;
1023  int rc;
1024 
1025  /* Reset device */
1026  if ( ( rc = gve_reset ( gve ) ) != 0 )
1027  goto err_reset;
1028 
1029  /* Enable admin queue */
1030  gve_admin_enable ( gve );
1031 
1032  /* Start device */
1033  if ( ( rc = gve_start ( gve ) ) != 0 )
1034  goto err_start;
1035 
1036  /* Reset retry count */
1037  gve->retries = 0;
1038 
1039  /* (Ab)use link status to report startup status */
1040  netdev_link_up ( netdev );
1041 
1042  return;
1043 
1044  gve_stop ( gve );
1045  err_start:
1046  err_reset:
1047  DBGC ( gve, "GVE %p startup failed: %s\n", gve, strerror ( rc ) );
1048  netdev_link_err ( netdev, rc );
1049  if ( gve->retries++ < GVE_RESET_MAX_RETRY )
1050  process_add ( &gve->startup );
1051 }
1052 
1053 /**
1054  * Trigger startup process
1055  *
1056  * @v gve GVE device
1057  */
1058 static void gve_restart ( struct gve_nic *gve ) {
1059  struct net_device *netdev = gve->netdev;
1060 
1061  /* Mark link down to inhibit polling and transmit activity */
1063 
1064  /* Schedule startup process */
1065  process_add ( &gve->startup );
1066 }
1067 
1068 /**
1069  * Reset recovery watchdog
1070  *
1071  * @v timer Reset recovery watchdog timer
1072  * @v over Failure indicator
1073  */
1074 static void gve_watchdog ( struct retry_timer *timer, int over __unused ) {
1075  struct gve_nic *gve = container_of ( timer, struct gve_nic, watchdog );
1077  uint32_t pfn;
1078  int rc;
1079 
1080  /* Reschedule watchdog */
1082 
1083  /* Reset device (for test purposes) if applicable */
1084  if ( ( rc = inject_fault ( VM_MIGRATED_RATE ) ) != 0 ) {
1085  DBGC ( gve, "GVE %p synthesising host reset\n", gve );
1086  writel ( 0, gve->cfg + GVE_CFG_ADMIN_PFN );
1087  }
1088 
1089  /* Check for activity since last timer invocation */
1090  activity = ( gve->tx.cons + gve->rx.cons );
1091  if ( activity != gve->activity ) {
1092  gve->activity = activity;
1093  return;
1094  }
1095 
1096  /* Check for reset */
1097  pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
1098  if ( pfn ) {
1099  DBGC2 ( gve, "GVE %p idle but not in reset\n", gve );
1100  return;
1101  }
1102 
1103  /* Schedule restart */
1104  DBGC ( gve, "GVE %p watchdog detected reset by host\n", gve );
1105  gve_restart ( gve );
1106 }
1107 
1108 /**
1109  * Open network device
1110  *
1111  * @v netdev Network device
1112  * @ret rc Return status code
1113  */
1114 static int gve_open ( struct net_device *netdev ) {
1115  struct gve_nic *gve = netdev->priv;
1116  struct gve_queue *tx = &gve->tx;
1117  struct gve_queue *rx = &gve->rx;
1118  int rc;
1119 
1120  /* Allocate and prepopulate transmit queue */
1121  if ( ( rc = gve_alloc_queue ( gve, tx ) ) != 0 )
1122  goto err_alloc_tx;
1123 
1124  /* Allocate and prepopulate receive queue */
1125  if ( ( rc = gve_alloc_queue ( gve, rx ) ) != 0 )
1126  goto err_alloc_rx;
1127 
1128  /* Trigger startup */
1129  gve_restart ( gve );
1130 
1131  /* Start reset recovery watchdog timer */
1133 
1134  return 0;
1135 
1136  gve_free_queue ( gve, rx );
1137  err_alloc_rx:
1138  gve_free_queue ( gve, tx );
1139  err_alloc_tx:
1140  return rc;
1141 }
1142 
1143 /**
1144  * Close network device
1145  *
1146  * @v netdev Network device
1147  */
1148 static void gve_close ( struct net_device *netdev ) {
1149  struct gve_nic *gve = netdev->priv;
1150  struct gve_queue *tx = &gve->tx;
1151  struct gve_queue *rx = &gve->rx;
1152 
1153  /* Stop reset recovery timer */
1154  stop_timer ( &gve->watchdog );
1155 
1156  /* Terminate startup process */
1157  process_del ( &gve->startup );
1158 
1159  /* Stop and reset device */
1160  gve_stop ( gve );
1161  gve_reset ( gve );
1162 
1163  /* Free queues */
1164  gve_free_queue ( gve, rx );
1165  gve_free_queue ( gve, tx );
1166 }
1167 
1168 /**
1169  * Transmit packet
1170  *
1171  * @v netdev Network device
1172  * @v iobuf I/O buffer
1173  * @ret rc Return status code
1174  */
1175 static int gve_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
1176  struct gve_nic *gve = netdev->priv;
1177  struct gve_queue *tx = &gve->tx;
1178  struct gve_tx_descriptor desc;
1179  unsigned int count;
1180  unsigned int index;
1181  size_t frag_len;
1182  size_t offset;
1183  size_t len;
1184 
1185  /* Do nothing if queues are not yet set up */
1186  if ( ! netdev_link_ok ( netdev ) )
1187  return -ENETDOWN;
1188 
1189  /* Defer packet if there is no space in the transmit ring */
1190  len = iob_len ( iobuf );
1191  count = ( ( len + GVE_BUF_SIZE - 1 ) / GVE_BUF_SIZE );
1192  if ( ( ( tx->prod - tx->cons ) + count ) > tx->fill ) {
1193  netdev_tx_defer ( netdev, iobuf );
1194  return 0;
1195  }
1196 
1197  /* Copy packet to queue pages and populate descriptors */
1198  for ( offset = 0 ; offset < len ; offset += frag_len ) {
1199 
1200  /* Sanity check */
1201  assert ( gve->tx_iobuf[ tx->prod % GVE_TX_FILL ] == NULL );
1202 
1203  /* Copy packet fragment */
1204  frag_len = ( len - offset );
1205  if ( frag_len > GVE_BUF_SIZE )
1206  frag_len = GVE_BUF_SIZE;
1207  copy_to_user ( gve_buffer ( tx, tx->prod ), 0,
1208  ( iobuf->data + offset ), frag_len );
1209 
1210  /* Populate descriptor */
1211  index = ( tx->prod++ & ( tx->count - 1 ) );
1212  memset ( &desc.pkt, 0, sizeof ( desc.pkt ) );
1213  if ( offset ) {
1214  desc.pkt.type = GVE_TX_TYPE_CONT;
1215  } else {
1216  desc.pkt.type = GVE_TX_TYPE_START;
1217  desc.pkt.count = count;
1218  desc.pkt.total = cpu_to_be16 ( len );
1219  }
1220  desc.pkt.len = cpu_to_be16 ( frag_len );
1221  copy_to_user ( tx->desc, ( index * sizeof ( desc ) ), &desc,
1222  sizeof ( desc.pkt ) );
1223  DBGC2 ( gve, "GVE %p TX %#04x %#02x:%#02x len %#04x/%#04x at "
1224  "%#08zx\n", gve, index, desc.pkt.type, desc.pkt.count,
1225  be16_to_cpu ( desc.pkt.len ),
1226  be16_to_cpu ( desc.pkt.total ),
1227  gve_address ( tx, index ) );
1228  }
1229  assert ( ( tx->prod - tx->cons ) <= tx->fill );
1230 
1231  /* Record I/O buffer against final descriptor */
1232  gve->tx_iobuf[ ( tx->prod - 1U ) % GVE_TX_FILL ] = iobuf;
1233 
1234  /* Ring doorbell */
1235  wmb();
1236  writel ( bswap_32 ( tx->prod ), tx->db );
1237 
1238  return 0;
1239 }
1240 
1241 /**
1242  * Poll for completed transmissions
1243  *
1244  * @v netdev Network device
1245  */
1246 static void gve_poll_tx ( struct net_device *netdev ) {
1247  struct gve_nic *gve = netdev->priv;
1248  struct gve_queue *tx = &gve->tx;
1249  struct io_buffer *iobuf;
1250  uint32_t count;
1251 
1252  /* Read event counter */
1253  count = be32_to_cpu ( tx->event->count );
1254 
1255  /* Process transmit completions */
1256  while ( count != tx->cons ) {
1257  DBGC2 ( gve, "GVE %p TX %#04x complete\n", gve, tx->cons );
1258  iobuf = gve->tx_iobuf[ tx->cons % GVE_TX_FILL ];
1259  gve->tx_iobuf[ tx->cons % GVE_TX_FILL ] = NULL;
1260  tx->cons++;
1261  if ( iobuf )
1262  netdev_tx_complete ( netdev, iobuf );
1263  }
1264 }
1265 
1266 /**
1267  * Poll for received packets
1268  *
1269  * @v netdev Network device
1270  */
1271 static void gve_poll_rx ( struct net_device *netdev ) {
1272  struct gve_nic *gve = netdev->priv;
1273  struct gve_queue *rx = &gve->rx;
1274  struct gve_rx_completion cmplt;
1275  struct io_buffer *iobuf;
1276  unsigned int index;
1277  unsigned int seq;
1278  uint32_t cons;
1279  size_t offset;
1280  size_t total;
1281  size_t len;
1282  int rc;
1283 
1284  /* Process receive completions */
1285  cons = rx->cons;
1286  seq = gve->seq;
1287  total = 0;
1288  while ( 1 ) {
1289 
1290  /* Read next possible completion */
1291  index = ( cons++ & ( rx->count - 1 ) );
1292  offset = ( ( index * sizeof ( cmplt ) ) +
1293  offsetof ( typeof ( cmplt ), pkt ) );
1294  copy_from_user ( &cmplt.pkt, rx->cmplt, offset,
1295  sizeof ( cmplt.pkt ) );
1296 
1297  /* Check sequence number */
1298  if ( ( cmplt.pkt.seq & GVE_RX_SEQ_MASK ) != seq )
1299  break;
1300  seq = gve_next ( seq );
1301 
1302  /* Parse completion */
1303  len = be16_to_cpu ( cmplt.pkt.len );
1304  DBGC2 ( gve, "GVE %p RX %#04x %#02x:%#02x len %#04zx at "
1305  "%#08zx\n", gve, index, cmplt.pkt.seq, cmplt.pkt.flags,
1306  len, gve_address ( rx, index ) );
1307 
1308  /* Accumulate a complete packet */
1309  if ( cmplt.pkt.flags & GVE_RXF_ERROR ) {
1310  total = 0;
1311  } else {
1312  total += len;
1313  if ( cmplt.pkt.flags & GVE_RXF_MORE )
1314  continue;
1315  }
1316  gve->seq = seq;
1317 
1318  /* Allocate and populate I/O buffer */
1319  iobuf = ( total ? alloc_iob ( total ) : NULL );
1320  for ( ; rx->cons != cons ; rx->cons++ ) {
1321 
1322  /* Re-read completion length */
1323  index = ( rx->cons & ( rx->count - 1 ) );
1324  offset = ( ( index * sizeof ( cmplt ) ) +
1325  offsetof ( typeof ( cmplt ), pkt.len ) );
1326  copy_from_user ( &cmplt.pkt, rx->cmplt, offset,
1327  sizeof ( cmplt.pkt.len ) );
1328 
1329  /* Copy data */
1330  if ( iobuf ) {
1331  len = be16_to_cpu ( cmplt.pkt.len );
1332  copy_from_user ( iob_put ( iobuf, len ),
1333  gve_buffer ( rx, rx->cons ),
1334  0, len );
1335  }
1336  }
1337  assert ( ( iobuf == NULL ) || ( iob_len ( iobuf ) == total ) );
1338  total = 0;
1339 
1340  /* Hand off packet to network stack */
1341  if ( iobuf ) {
1342  iob_pull ( iobuf, GVE_RX_PAD );
1343  netdev_rx ( netdev, iobuf );
1344  } else {
1345  rc = ( ( cmplt.pkt.flags & GVE_RXF_ERROR ) ?
1346  -EIO : -ENOMEM );
1347  netdev_rx_err ( netdev, NULL, rc );
1348  }
1349 
1350  /* Sanity check */
1351  assert ( rx->cons == cons );
1352  assert ( gve->seq == seq );
1353  assert ( total == 0 );
1354  }
1355 }
1356 
1357 /**
1358  * Refill receive queue
1359  *
1360  * @v netdev Network device
1361  */
1362 static void gve_refill_rx ( struct net_device *netdev ) {
1363  struct gve_nic *gve = netdev->priv;
1364  struct gve_queue *rx = &gve->rx;
1365  unsigned int prod;
1366 
1367  /* The receive descriptors are prepopulated at the time of
1368  * creating the receive queue (pointing to the preallocated
1369  * queue pages). Refilling is therefore just a case of
1370  * ringing the doorbell if the device is not yet aware of any
1371  * available descriptors.
1372  */
1373  prod = ( rx->cons + rx->fill );
1374  if ( prod != rx->prod ) {
1375  rx->prod = prod;
1376  writel ( bswap_32 ( prod ), rx->db );
1377  DBGC2 ( gve, "GVE %p RX %#04x ready\n", gve, rx->prod );
1378  }
1379 }
1380 
1381 /**
1382  * Poll for completed and received packets
1383  *
1384  * @v netdev Network device
1385  */
1386 static void gve_poll ( struct net_device *netdev ) {
1387 
1388  /* Do nothing if queues are not yet set up */
1389  if ( ! netdev_link_ok ( netdev ) )
1390  return;
1391 
1392  /* Poll for transmit completions */
1393  gve_poll_tx ( netdev );
1394 
1395  /* Poll for receive completions */
1396  gve_poll_rx ( netdev );
1397 
1398  /* Refill receive queue */
1399  gve_refill_rx ( netdev );
1400 }
1401 
1402 /** GVE network device operations */
1404  .open = gve_open,
1405  .close = gve_close,
1406  .transmit = gve_transmit,
1407  .poll = gve_poll,
1408 };
1409 
1410 /******************************************************************************
1411  *
1412  * PCI interface
1413  *
1414  ******************************************************************************
1415  */
1416 
1417 /** Transmit descriptor queue type */
1418 static const struct gve_queue_type gve_tx_type = {
1419  .name = "TX",
1420  .param = gve_create_tx_param,
1421  .qpl = GVE_TX_QPL,
1422  .irq = GVE_TX_IRQ,
1423  .fill = GVE_TX_FILL,
1424  .desc_len = sizeof ( struct gve_tx_descriptor ),
1425  .create = GVE_ADMIN_CREATE_TX,
1426  .destroy = GVE_ADMIN_DESTROY_TX,
1427 };
1428 
1429 /** Receive descriptor queue type */
1430 static const struct gve_queue_type gve_rx_type = {
1431  .name = "RX",
1432  .param = gve_create_rx_param,
1433  .qpl = GVE_RX_QPL,
1434  .irq = GVE_RX_IRQ,
1435  .fill = GVE_RX_FILL,
1436  .desc_len = sizeof ( struct gve_rx_descriptor ),
1437  .cmplt_len = sizeof ( struct gve_rx_completion ),
1438  .create = GVE_ADMIN_CREATE_RX,
1439  .destroy = GVE_ADMIN_DESTROY_RX,
1440 };
1441 
1442 /**
1443  * Set up admin queue and get device description
1444  *
1445  * @v gve GVE device
1446  * @ret rc Return status code
1447  */
1448 static int gve_setup ( struct gve_nic *gve ) {
1449  unsigned int i;
1450  int rc;
1451 
1452  /* Attempt several times, since the device may decide to add
1453  * in a few spurious resets.
1454  */
1455  for ( i = 0 ; i < GVE_RESET_MAX_RETRY ; i++ ) {
1456 
1457  /* Reset device */
1458  if ( ( rc = gve_reset ( gve ) ) != 0 )
1459  continue;
1460 
1461  /* Enable admin queue */
1462  gve_admin_enable ( gve );
1463 
1464  /* Fetch MAC address */
1465  if ( ( rc = gve_describe ( gve ) ) != 0 )
1466  continue;
1467 
1468  /* Success */
1469  return 0;
1470  }
1471 
1472  DBGC ( gve, "GVE %p failed to get device description: %s\n",
1473  gve, strerror ( rc ) );
1474  return rc;
1475 }
1476 
1477 /** Device startup process descriptor */
1480 
1481 /**
1482  * Probe PCI device
1483  *
1484  * @v pci PCI device
1485  * @ret rc Return status code
1486  */
1487 static int gve_probe ( struct pci_device *pci ) {
1488  struct net_device *netdev;
1489  struct gve_nic *gve;
1490  unsigned long cfg_start;
1491  unsigned long db_start;
1492  unsigned long db_size;
1493  int rc;
1494 
1495  /* Allocate and initialise net device */
1496  netdev = alloc_etherdev ( sizeof ( *gve ) );
1497  if ( ! netdev ) {
1498  rc = -ENOMEM;
1499  goto err_alloc;
1500  }
1502  gve = netdev->priv;
1503  pci_set_drvdata ( pci, netdev );
1504  netdev->dev = &pci->dev;
1505  memset ( gve, 0, sizeof ( *gve ) );
1506  gve->netdev = netdev;
1507  gve->tx.type = &gve_tx_type;
1508  gve->rx.type = &gve_rx_type;
1510  timer_init ( &gve->watchdog, gve_watchdog, &netdev->refcnt );
1511 
1512  /* Fix up PCI device */
1513  adjust_pci_device ( pci );
1514 
1515  /* Check PCI revision */
1516  pci_read_config_byte ( pci, PCI_REVISION, &gve->revision );
1517  DBGC ( gve, "GVE %p is revision %#02x\n", gve, gve->revision );
1518 
1519  /* Map configuration registers */
1520  cfg_start = pci_bar_start ( pci, GVE_CFG_BAR );
1521  gve->cfg = pci_ioremap ( pci, cfg_start, GVE_CFG_SIZE );
1522  if ( ! gve->cfg ) {
1523  rc = -ENODEV;
1524  goto err_cfg;
1525  }
1526 
1527  /* Map doorbell registers */
1528  db_start = pci_bar_start ( pci, GVE_DB_BAR );
1529  db_size = pci_bar_size ( pci, GVE_DB_BAR );
1530  gve->db = pci_ioremap ( pci, db_start, db_size );
1531  if ( ! gve->db ) {
1532  rc = -ENODEV;
1533  goto err_db;
1534  }
1535 
1536  /* Configure DMA */
1537  gve->dma = &pci->dma;
1538  dma_set_mask_64bit ( gve->dma );
1539  assert ( netdev->dma == NULL );
1540 
1541  /* Allocate admin queue */
1542  if ( ( rc = gve_admin_alloc ( gve ) ) != 0 )
1543  goto err_admin;
1544 
1545  /* Set up the device */
1546  if ( ( rc = gve_setup ( gve ) ) != 0 )
1547  goto err_setup;
1548 
1549  /* Register network device */
1550  if ( ( rc = register_netdev ( netdev ) ) != 0 )
1551  goto err_register_netdev;
1552 
1553  return 0;
1554 
1556  err_register_netdev:
1557  err_setup:
1558  gve_reset ( gve );
1559  gve_admin_free ( gve );
1560  err_admin:
1561  iounmap ( gve->db );
1562  err_db:
1563  iounmap ( gve->cfg );
1564  err_cfg:
1565  netdev_nullify ( netdev );
1566  netdev_put ( netdev );
1567  err_alloc:
1568  return rc;
1569 }
1570 
1571 /**
1572  * Remove PCI device
1573  *
1574  * @v pci PCI device
1575  */
1576 static void gve_remove ( struct pci_device *pci ) {
1577  struct net_device *netdev = pci_get_drvdata ( pci );
1578  struct gve_nic *gve = netdev->priv;
1579 
1580  /* Unregister network device */
1582 
1583  /* Reset device */
1584  gve_reset ( gve );
1585 
1586  /* Free admin queue */
1587  gve_admin_free ( gve );
1588 
1589  /* Unmap registers */
1590  iounmap ( gve->db );
1591  iounmap ( gve->cfg );
1592 
1593  /* Free network device */
1594  netdev_nullify ( netdev );
1595  netdev_put ( netdev );
1596 }
1597 
1598 /** GVE PCI device IDs */
1599 static struct pci_device_id gve_nics[] = {
1600  PCI_ROM ( 0x1ae0, 0x0042, "gve", "gVNIC", 0 ),
1601 };
1602 
1603 /** GVE PCI driver */
1604 struct pci_driver gve_driver __pci_driver = {
1605  .ids = gve_nics,
1606  .id_count = ( sizeof ( gve_nics ) / sizeof ( gve_nics[0] ) ),
1607  .probe = gve_probe,
1608  .remove = gve_remove,
1609 };
#define cpu_to_be16(value)
Definition: byteswap.h:109
#define iob_pull(iobuf, len)
Definition: iobuf.h:102
#define __attribute__(x)
Definition: compiler.h:10
uint32_t base
Base.
Definition: librm.h:252
#define EINVAL
Invalid argument.
Definition: errno.h:428
#define GVE_PAGE_SIZE
Page size.
Definition: gve.h:42
#define ECONNRESET
Connection reset.
Definition: errno.h:363
struct dma_device * dma
DMA device.
Definition: gve.h:670
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
#define GVE_ADMIN_STATUS_OK
Command succeeded.
Definition: gve.h:122
struct gve_rx_packet pkt
Packet descriptor.
Definition: gve.h:588
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition: netdevice.h:752
wmb()
DMA mappings.
static void gve_stop(struct gve_nic *gve)
Stop device.
Definition: gve.c:1000
#define iob_put(iobuf, len)
Definition: iobuf.h:120
struct dma_device dma
DMA device.
Definition: pci.h:210
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:586
A receive descriptor.
Definition: gve.h:559
void netdev_tx_defer(struct net_device *netdev, struct io_buffer *iobuf)
Defer transmitted packet.
Definition: netdevice.c:412
A PCI driver.
Definition: pci.h:247
static void gve_refill_rx(struct net_device *netdev)
Refill receive queue.
Definition: gve.c:1362
Create receive queue command.
Definition: gve.h:250
static int gve_describe(struct gve_nic *gve)
Get device descriptor.
Definition: gve.c:449
#define GVE_CFG_ADMIN_PFN
Admin queue page frame number (for older devices)
Definition: gve.h:84
#define GVE_ADMIN_DESTROY_RX
Destroy receive queue command.
Definition: gve.h:279
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
static int gve_alloc_queue(struct gve_nic *gve, struct gve_queue *queue)
Allocate descriptor queue.
Definition: gve.c:817
uint8_t opcode
Opcode.
Definition: ena.h:16
struct dma_mapping map
DMA mapping.
Definition: gve.h:374
struct gve_event * event
Event counters.
Definition: gve.h:372
#define GVE_ADMIN_DESCRIBE
Describe device command.
Definition: gve.h:133
union gve_scratch::@46 * buf
Buffer contents.
static void gve_startup(struct gve_nic *gve)
Device startup process.
Definition: gve.c:1021
#define GVE_RX_IRQ
Receive queue interrupt channel.
Definition: gve.h:556
#define GVE_CFG_DEVSTAT
Device status.
Definition: gve.h:73
Error codes.
static int gve_destroy_queue(struct gve_nic *gve, struct gve_queue *queue)
Destroy transmit or receive queue.
Definition: gve.c:689
struct gve_device_descriptor desc
Device descriptor.
Definition: gve.h:333
I/O buffers.
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:249
#define GVE_CFG_BAR
Configuration BAR.
Definition: gve.h:63
uint32_t type
Operating system type.
Definition: ena.h:12
static int gve_deconfigure(struct gve_nic *gve)
Deconfigure device resources.
Definition: gve.c:537
size_t mtu
Maximum transmission unit length.
Definition: netdevice.h:415
static void process_init(struct process *process, struct process_descriptor *desc, struct refcnt *refcnt)
Initialise process and add to process list.
Definition: process.h:161
static __always_inline void copy_from_user(void *dest, userptr_t src, off_t src_off, size_t len)
Copy data from user buffer.
Definition: uaccess.h:337
#define GVE_RXF_MORE
Receive packet continues into next descriptor.
Definition: gve.h:578
#define GVE_CFG_DRVSTAT
Driver status.
Definition: gve.h:77
physaddr_t dma_phys(struct dma_mapping *map, physaddr_t addr)
Get DMA address from physical address.
Create transmit queue command.
Definition: gve.h:229
uint32_t readl(volatile uint32_t *io_addr)
Read 32-bit dword from memory-mapped device.
unsigned long user_to_phys(userptr_t userptr, off_t offset)
Convert user pointer to physical address.
uint64_t desc
Microcode descriptor list physical address.
Definition: ucode.h:12
static int gve_admin(struct gve_nic *gve)
Issue admin queue command.
Definition: gve.c:367
#define DBGC(...)
Definition: compiler.h:505
static int gve_admin_alloc(struct gve_nic *gve)
Allocate admin queue.
Definition: gve.c:191
Simple admin command.
Definition: gve.h:125
A process descriptor.
Definition: process.h:31
#define GVE_ALIGN
Address alignment.
Definition: gve.h:51
A retry timer.
Definition: retry.h:21
long index
Definition: bigint.h:61
uint64_t addr[GVE_QPL_MAX]
Page address.
Definition: gve.h:219
static __always_inline void dma_set_mask_64bit(struct dma_device *dma)
Set 64-bit addressable space mask.
Definition: dma.h:474
unsigned long long uint64_t
Definition: stdint.h:13
static void gve_create_rx_param(struct gve_queue *queue, union gve_admin_command *cmd)
Construct command to create receive queue.
Definition: gve.c:626
A receive completion descriptor.
Definition: gve.h:584
#define GVE_ADMIN_DESTROY_TX
Destroy transmit queue command.
Definition: gve.h:276
struct dma_device * dma
DMA device.
Definition: netdevice.h:366
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:230
uint32_t qpl
Queue page list ID.
Definition: gve.h:644
#define GVE_BUF_PER_PAGE
Number of data buffers per page.
Definition: gve.h:449
#define PROC_DESC_ONCE(object_type, process, _step)
Define a process descriptor for a process that runs only once.
Definition: process.h:97
#define GVE_IRQ_DISABLE
Disable interrupts.
Definition: gve.h:414
#define offsetof(type, field)
Get offset of a field within a structure.
Definition: stddef.h:24
A transmit or receive buffer descriptor.
Definition: gve.h:510
static void gve_remove(struct pci_device *pci)
Remove PCI device.
Definition: gve.c:1576
#define GVE_WATCHDOG_TIMEOUT
Time between reset recovery checks.
Definition: gve.h:707
const char * name
Name.
Definition: gve.h:634
Device descriptor.
Definition: gve.h:151
#define GVE_ADMIN_REGISTER
Register page list command.
Definition: gve.h:192
void process_del(struct process *process)
Remove process from process list.
Definition: process.c:79
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:154
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:129
static void gve_free_queue(struct gve_nic *gve, struct gve_queue *queue)
Free descriptor queue.
Definition: gve.c:911
static __always_inline unsigned long virt_to_phys(volatile const void *addr)
Convert virtual address to a physical address.
Definition: uaccess.h:287
static int gve_register(struct gve_nic *gve, struct gve_qpl *qpl)
Register queue page list.
Definition: gve.c:554
uint16_t len
Length.
Definition: gve.h:567
struct gve_queue rx
Receive queue.
Definition: gve.h:684
struct device dev
Generic device.
Definition: pci.h:208
#define GVE_TX_TYPE_CONT
Continuation of packet transmit descriptor type.
Definition: gve.h:541
static struct net_device_operations gve_operations
GVE network device operations.
Definition: gve.c:1403
struct gve_queue tx
Transmit queue.
Definition: gve.h:682
#define ECANCELED
Operation canceled.
Definition: errno.h:343
u16 seq
802.11 Sequence Control field
Definition: ieee80211.h:19
#define GVE_TX_FILL
Maximum number of transmit buffers.
Definition: gve.h:501
static const struct gve_queue_type gve_tx_type
Transmit descriptor queue type.
Definition: gve.c:1418
A timer.
Definition: timer.h:28
static int gve_start(struct gve_nic *gve)
Start up device.
Definition: gve.c:937
#define GVE_DB_BAR
Doorbell BAR.
Definition: gve.h:105
#define GVE_EVENT_MAX
Maximum number of event counters.
Definition: gve.h:367
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:515
void memset_user(userptr_t userptr, off_t offset, int c, size_t len)
Fill user buffer with a constant byte.
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:359
struct dma_mapping map
Page mapping.
Definition: gve.h:489
#define rmb()
Definition: io.h:484
#define ENOMEM
Not enough space.
Definition: errno.h:534
uint32_t activity
Reset recovery recorded activity counter.
Definition: gve.h:697
void * db
Doorbell registers.
Definition: gve.h:664
userptr_t userptr_add(userptr_t userptr, off_t offset)
Add offset to user pointer.
void * memcpy(void *dest, const void *src, size_t len) __nonnull
struct retry_timer watchdog
Reset recovery watchdog timer.
Definition: gve.h:695
uint32_t userptr_t
A pointer to a user buffer.
Definition: libkir.h:159
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
An admin queue command.
Definition: gve.h:285
#define GVE_CFG_ADMIN_DB
Admin queue doorbell.
Definition: gve.h:87
#define ETH_HLEN
Definition: if_ether.h:9
void dma_free(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
Assertions.
#define GVE_RX_SEQ_MASK
Receive sequence number mask.
Definition: gve.h:581
struct dma_mapping map
DMA mapping.
Definition: gve.h:338
#define be32_to_cpu(value)
Definition: byteswap.h:116
struct process startup
Startup process.
Definition: gve.h:691
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
Google Virtual Ethernet network driver.
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:572
#define GVE_RX_FILL
Maximum number of receive buffers.
Definition: gve.h:550
#define container_of(ptr, type, field)
Get containing structure.
Definition: stddef.h:35
Ethernet protocol.
static userptr_t gve_buffer(struct gve_queue *queue, unsigned int index)
Get buffer address.
Definition: gve.c:779
uint8_t seq
Sequence number.
Definition: gve.h:571
#define GVE_ADMIN_CREATE_TX
Create transmit queue command.
Definition: gve.h:226
#define VM_MIGRATED_RATE
Definition: fault.h:33
#define GVE_CFG_ADMIN_LEN
Admin queue base address length (16-bit register)
Definition: gve.h:102
void * priv
Driver private data.
Definition: netdevice.h:431
static int gve_reset(struct gve_nic *gve)
Reset hardware.
Definition: gve.c:139
static void gve_free_qpl(struct gve_nic *nic __unused, struct gve_qpl *qpl)
Free queue page list.
Definition: gve.c:746
#define bswap_16(value)
Definition: byteswap.h:58
static int gve_admin_simple(struct gve_nic *gve, unsigned int opcode, unsigned int id)
Issue simple admin queue command.
Definition: gve.c:426
#define DBGC_HDA(...)
Definition: compiler.h:506
uint32_t cons
Consumer counter.
Definition: gve.h:625
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:774
void writel(uint32_t data, volatile uint32_t *io_addr)
Write 32-bit dword to memory-mapped device.
volatile uint32_t * db[GVE_IRQ_COUNT]
Interrupt doorbells.
Definition: gve.h:410
static void gve_poll_rx(struct net_device *netdev)
Poll for received packets.
Definition: gve.c:1271
#define build_assert(condition)
Assert a condition at build time (after dead code elimination)
Definition: assert.h:76
uint32_t prod
Producer counter.
Definition: gve.h:323
static int netdev_link_ok(struct net_device *netdev)
Check link state of network device.
Definition: netdevice.h:636
static struct net_device * netdev
Definition: gdbudp.c:52
unsigned int count
Number of pages.
Definition: gve.h:491
#define be16_to_cpu(value)
Definition: byteswap.h:115
Queue page list.
Definition: gve.h:485
uint16_t count
Number of entries.
Definition: ena.h:22
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
#define GVE_TX_TYPE_START
Start of packet transmit descriptor type.
Definition: gve.h:538
struct gve_irqs irqs
Interrupt channels.
Definition: gve.h:675
#define GVE_TX_QPL
Transmit queue page list ID.
Definition: gve.h:504
static void gve_create_tx_param(struct gve_queue *queue, union gve_admin_command *cmd)
Construct command to create transmit queue.
Definition: gve.c:607
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition: netdevice.c:941
static int gve_create_queue(struct gve_nic *gve, struct gve_queue *queue)
Create transmit or receive queue.
Definition: gve.c:649
A descriptor queue.
Definition: gve.h:595
#define bswap_32(value)
Definition: byteswap.h:70
uint8_t id
Request identifier.
Definition: ena.h:12
void process_add(struct process *process)
Add process to process list.
Definition: process.c:59
#define DBGC2_HDA(...)
Definition: compiler.h:523
uint8_t flags
Flags.
Definition: gve.h:569
void * cfg
Configuration registers.
Definition: gve.h:662
#define GVE_CFG_ADMIN_BASE_LO
Admin queue base address low 32 bits.
Definition: gve.h:99
#define GVE_RESET_MAX_WAIT_MS
Maximum time to wait for reset.
Definition: gve.h:81
char * strerror(int errno)
Retrieve string representation of error number.
Definition: strerror.c:78
struct refcnt refcnt
Reference counter.
Definition: netdevice.h:354
static int gve_alloc_qpl(struct gve_nic *gve, struct gve_qpl *qpl, uint32_t id, unsigned int buffers)
Allocate queue page list.
Definition: gve.c:716
uint16_t cons
Consumer index.
Definition: ena.h:22
static void gve_restart(struct gve_nic *gve)
Trigger startup process.
Definition: gve.c:1058
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Find the size of a PCI BAR.
Definition: pciextra.c:92
PCI bus.
static void gve_poll_tx(struct net_device *netdev)
Poll for completed transmissions.
Definition: gve.c:1246
A PCI device.
Definition: pci.h:206
int register_netdev(struct net_device *netdev)
Register network device.
Definition: netdevice.c:759
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:155
const char * eth_ntoa(const void *ll_addr)
Transcribe Ethernet address.
Definition: ethernet.c:175
Event counter array.
Definition: gve.h:370
static __always_inline void copy_to_user(userptr_t dest, off_t dest_off, const void *src, size_t len)
Copy data to user buffer.
Definition: uaccess.h:324
A network device.
Definition: netdevice.h:352
void netdev_link_err(struct net_device *netdev, int rc)
Mark network device as having a specific link state.
Definition: netdevice.c:207
#define ENODEV
No such device.
Definition: errno.h:509
char * inet_ntoa(struct in_addr in)
Convert IPv4 address to dotted-quad notation.
Definition: ipv4.c:668
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:528
#define GVE_ADMIN_DECONFIGURE
Deconfigure device resources command.
Definition: gve.h:282
A Google Virtual Ethernet NIC.
Definition: gve.h:660
struct io_buffer * tx_iobuf[GVE_TX_FILL]
Transmit I/O buffers.
Definition: gve.h:686
static void gve_watchdog(struct retry_timer *timer, int over __unused)
Reset recovery watchdog.
Definition: gve.c:1074
static void gve_close(struct net_device *netdev)
Close network device.
Definition: gve.c:1148
static size_t gve_address(struct gve_queue *queue, unsigned int index)
Get buffer address (within queue page list address space)
Definition: gve.c:762
struct gve_pages pages
Page address list.
Definition: gve.h:335
#define ETH_ALEN
Definition: if_ether.h:8
A PCI device ID list entry.
Definition: pci.h:170
#define EIO_ADMIN(status)
Definition: gve.c:116
static int gve_open(struct net_device *netdev)
Open network device.
Definition: gve.c:1114
Definition: nic.h:49
unsigned int uint32_t
Definition: stdint.h:12
unsigned int seq
Receive sequence number.
Definition: gve.h:688
#define GVE_RX_QPL
Receive queue page list ID.
Definition: gve.h:553
#define ENETDOWN
Network is down.
Definition: errno.h:478
static struct xen_remove_from_physmap * remove
Definition: xenmem.h:39
uint8_t status
Status.
Definition: ena.h:16
Network device operations.
Definition: netdevice.h:213
uint8_t desc_len
Descriptor size.
Definition: gve.h:650
#define GVE_TX_IRQ
Tranmsit queue interrupt channel.
Definition: gve.h:507
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:548
struct device * dev
Underlying hardware device.
Definition: netdevice.h:364
#define GVE_QPL_MAX
Maximum number of pages per queue.
Definition: gve.h:214
void * dma_alloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer.
Network device management.
void start_timer_fixed(struct retry_timer *timer, unsigned long timeout)
Start timer with a specified timeout.
Definition: retry.c:64
unsigned long physaddr_t
Definition: stdint.h:20
unsigned int id
Queue page list ID.
Definition: gve.h:493
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:369
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition: timer.c:78
#define cpu_to_be32(value)
Definition: byteswap.h:110
#define GVE_ADMIN_CONFIGURE
Configure device resources command.
Definition: gve.h:173
void stop_timer(struct retry_timer *timer)
Stop timer.
Definition: retry.c:117
void dma_ufree(struct dma_mapping *map, userptr_t addr, size_t len)
Unmap and free DMA-coherent buffer from external (user) memory.
void netdev_tx_complete_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Complete network transmission.
Definition: netdevice.c:470
#define GVE_ADMIN_MAX_WAIT_MS
Maximum time to wait for admin queue commands.
Definition: gve.h:701
#define GVE_CFG_ADMIN_BASE_HI
Admin queue base address high 32 bits.
Definition: gve.h:96
static unsigned int gve_next(unsigned int seq)
Calculate next receive sequence number.
Definition: gve.c:792
unsigned int count
Number of descriptors (must be a power of two)
Definition: gve.h:606
static struct pci_device_id gve_nics[]
GVE PCI device IDs.
Definition: gve.c:1599
struct gve_events events
Event counters.
Definition: gve.h:677
#define GVE_ADMIN_COUNT
Number of admin queue commands.
Definition: gve.h:316
#define GVE_CFG_ADMIN_EVT
Admin queue event counter.
Definition: gve.h:90
#define DBGC2(...)
Definition: compiler.h:522
#define GVE_IRQ_COUNT
Number of interrupt channels.
Definition: gve.h:401
int(* probe)(struct pci_device *pci)
Probe device.
Definition: pci.h:260
userptr_t data
Page addresses.
Definition: gve.h:487
#define GVE_CFG_DRVSTAT_RUN
Run admin queue.
Definition: gve.h:78
static int gve_transmit(struct net_device *netdev, struct io_buffer *iobuf)
Transmit packet.
Definition: gve.c:1175
uint64_t addr
Address (within queue page list address space)
Definition: gve.h:512
Interrupt channel array.
Definition: gve.h:404
unsigned int retries
Startup process retry counter.
Definition: gve.h:693
void * data
Start of data.
Definition: iobuf.h:48
unsigned int count
Actual number of event counters.
Definition: gve.h:376
u32 addr
Definition: sky2.h:8
#define EIO
Input/output error.
Definition: errno.h:433
#define GVE_ADMIN_CREATE_RX
Create receive queue command.
Definition: gve.h:247
uint8_t revision
PCI revision.
Definition: gve.h:666
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition: ethernet.c:264
u8 rx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets from the AP.
Definition: wpa.h:234
static void gve_admin_enable(struct gve_nic *gve)
Enable admin queue.
Definition: gve.c:280
static union gve_admin_command * gve_admin_command(struct gve_nic *gve)
Get next available admin queue command slot.
Definition: gve.c:310
void iounmap(volatile const void *io_addr)
Unmap I/O address.
#define cpu_to_be64(value)
Definition: byteswap.h:111
struct pci_driver gve_driver __pci_driver
GVE PCI driver.
Definition: gve.c:1604
#define PCI_REVISION
PCI revision.
Definition: pci.h:44
static int gve_unregister(struct gve_nic *gve, struct gve_qpl *qpl)
Unregister page list.
Definition: gve.c:589
static int gve_admin_wait(struct gve_nic *gve)
Wait for admin queue command to complete.
Definition: gve.c:331
static const struct gve_queue_type gve_rx_type
Receive descriptor queue type.
Definition: gve.c:1430
Page list.
Definition: gve.h:217
userptr_t dma_umalloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer from external (user) memory.
uint32_t db_idx
Interrupt doorbell index (within doorbell BAR)
Definition: gve.h:382
Scratch buffer for admin queue commands.
Definition: gve.h:329
static int gve_probe(struct pci_device *pci)
Probe PCI device.
Definition: gve.c:1487
uint16_t offset
Offset to command line.
Definition: bzimage.h:8
#define GVE_ADMIN_UNREGISTER
Unregister page list command.
Definition: gve.h:223
Fault injection.
struct net_device * netdev
Network device.
Definition: gve.h:668
typeof(acpi_finder=acpi_find)
ACPI table finder.
Definition: acpi.c:45
struct dma_mapping map
DMA mapping.
Definition: gve.h:408
struct gve_irq * irq
Interrupt channels.
Definition: gve.h:406
#define GVE_ADMIN_DESCRIBE_VER
Device descriptor version.
Definition: gve.h:148
Admin queue.
Definition: gve.h:319
#define GVE_BUF_SIZE
Queue data buffer size.
Definition: gve.h:446
static int gve_configure(struct gve_nic *gve)
Configure device resources.
Definition: gve.c:496
static void gve_admin_free(struct gve_nic *gve)
Free admin queue.
Definition: gve.c:252
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
union gve_admin_command * cmd
Commands.
Definition: gve.h:321
A descriptor queue type.
Definition: gve.h:632
size_t max_pkt_len
Maximum packet length.
Definition: netdevice.h:409
uint8_t cmplt_len
Completion size.
Definition: gve.h:652
struct dma_mapping map
DMA mapping.
Definition: gve.h:325
static int gve_setup(struct gve_nic *gve)
Set up admin queue and get device description.
Definition: gve.c:1448
#define GVE_RXF_ERROR
Receive error.
Definition: gve.h:575
static __always_inline physaddr_t dma(struct dma_mapping *map, void *addr)
Get DMA address from virtual address.
Definition: dma.h:436
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
const struct gve_queue_type * type
Queue type.
Definition: gve.h:604
struct gve_admin admin
Admin queue.
Definition: gve.h:673
uint16_t queue
Queue ID.
Definition: ena.h:22
uint32_t prod
Producer counter.
Definition: gve.h:623
uint32_t len
Length.
Definition: ena.h:14
uint8_t hw_addr[MAX_HW_ADDR_LEN]
Hardware address.
Definition: netdevice.h:381
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
#define ETIMEDOUT
Connection timed out.
Definition: errno.h:669
String functions.
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition: pci.h:303
#define GVE_RX_PAD
Padding at the start of all received packets.
Definition: gve.h:592
uint8_t create
Command to create queue.
Definition: gve.h:654
A transmit descriptor.
Definition: gve.h:530
#define GVE_RESET_MAX_RETRY
Maximum number of times to reattempt device reset.
Definition: gve.h:704
static void gve_poll(struct net_device *netdev)
Poll for completed and received packets.
Definition: gve.c:1386
void startup(void)
Start up iPXE.
Definition: init.c:67
struct gve_scratch scratch
Scratch buffer.
Definition: gve.h:679
static struct process_descriptor gve_startup_desc
Device startup process descriptor.
Definition: gve.c:1478
u8 tx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets to the AP.
Definition: wpa.h:237
A DMA-capable device.
Definition: dma.h:47
#define GVE_CFG_SIZE
Configuration BAR size.
Definition: gve.h:70
void * memset(void *dest, int character, size_t len) __nonnull
int pci_read_config_byte(struct pci_device *pci, unsigned int where, uint8_t *value)
Read byte from PCI configuration space.
A persistent I/O buffer.
Definition: iobuf.h:33