iPXE
gve.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2024 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  *
19  * You can also choose to distribute this program under the terms of
20  * the Unmodified Binary Distribution Licence (as given in the file
21  * COPYING.UBDL), provided that you have satisfied its requirements.
22  */
23 
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25 
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <errno.h>
30 #include <assert.h>
31 #include <byteswap.h>
32 #include <ipxe/netdevice.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/if_ether.h>
35 #include <ipxe/iobuf.h>
36 #include <ipxe/dma.h>
37 #include <ipxe/pci.h>
38 #include <ipxe/fault.h>
39 #include "gve.h"
40 
41 /** @file
42  *
43  * Google Virtual Ethernet network driver
44  *
45  */
46 
47 /* Disambiguate the various error causes */
48 #define EINFO_EIO_ADMIN_UNSET \
49  __einfo_uniqify ( EINFO_EIO, 0x00, "Uncompleted" )
50 #define EIO_ADMIN_UNSET \
51  __einfo_error ( EINFO_EIO_ADMIN_UNSET )
52 #define EINFO_EIO_ADMIN_ABORTED \
53  __einfo_uniqify ( EINFO_EIO, 0x10, "Aborted" )
54 #define EIO_ADMIN_ABORTED \
55  __einfo_error ( EINFO_EIO_ADMIN_ABORTED )
56 #define EINFO_EIO_ADMIN_EXISTS \
57  __einfo_uniqify ( EINFO_EIO, 0x11, "Already exists" )
58 #define EIO_ADMIN_EXISTS \
59  __einfo_error ( EINFO_EIO_ADMIN_EXISTS )
60 #define EINFO_EIO_ADMIN_CANCELLED \
61  __einfo_uniqify ( EINFO_EIO, 0x12, "Cancelled" )
62 #define EIO_ADMIN_CANCELLED \
63  __einfo_error ( EINFO_EIO_ADMIN_CANCELLED )
64 #define EINFO_EIO_ADMIN_DATALOSS \
65  __einfo_uniqify ( EINFO_EIO, 0x13, "Data loss" )
66 #define EIO_ADMIN_DATALOSS \
67  __einfo_error ( EINFO_EIO_ADMIN_DATALOSS )
68 #define EINFO_EIO_ADMIN_DEADLINE \
69  __einfo_uniqify ( EINFO_EIO, 0x14, "Deadline exceeded" )
70 #define EIO_ADMIN_DEADLINE \
71  __einfo_error ( EINFO_EIO_ADMIN_DEADLINE )
72 #define EINFO_EIO_ADMIN_PRECONDITION \
73  __einfo_uniqify ( EINFO_EIO, 0x15, "Failed precondition" )
74 #define EIO_ADMIN_PRECONDITION \
75  __einfo_error ( EINFO_EIO_ADMIN_PRECONDITION )
76 #define EINFO_EIO_ADMIN_INTERNAL \
77  __einfo_uniqify ( EINFO_EIO, 0x16, "Internal error" )
78 #define EIO_ADMIN_INTERNAL \
79  __einfo_error ( EINFO_EIO_ADMIN_INTERNAL )
80 #define EINFO_EIO_ADMIN_INVAL \
81  __einfo_uniqify ( EINFO_EIO, 0x17, "Invalid argument" )
82 #define EIO_ADMIN_INVAL \
83  __einfo_error ( EINFO_EIO_ADMIN_INVAL )
84 #define EINFO_EIO_ADMIN_NOT_FOUND \
85  __einfo_uniqify ( EINFO_EIO, 0x18, "Not found" )
86 #define EIO_ADMIN_NOT_FOUND \
87  __einfo_error ( EINFO_EIO_ADMIN_NOT_FOUND )
88 #define EINFO_EIO_ADMIN_RANGE \
89  __einfo_uniqify ( EINFO_EIO, 0x19, "Out of range" )
90 #define EIO_ADMIN_RANGE \
91  __einfo_error ( EINFO_EIO_ADMIN_RANGE )
92 #define EINFO_EIO_ADMIN_PERM \
93  __einfo_uniqify ( EINFO_EIO, 0x1a, "Permission denied" )
94 #define EIO_ADMIN_PERM \
95  __einfo_error ( EINFO_EIO_ADMIN_PERM )
96 #define EINFO_EIO_ADMIN_UNAUTH \
97  __einfo_uniqify ( EINFO_EIO, 0x1b, "Unauthenticated" )
98 #define EIO_ADMIN_UNAUTH \
99  __einfo_error ( EINFO_EIO_ADMIN_UNAUTH )
100 #define EINFO_EIO_ADMIN_RESOURCE \
101  __einfo_uniqify ( EINFO_EIO, 0x1c, "Resource exhausted" )
102 #define EIO_ADMIN_RESOURCE \
103  __einfo_error ( EINFO_EIO_ADMIN_RESOURCE )
104 #define EINFO_EIO_ADMIN_UNAVAIL \
105  __einfo_uniqify ( EINFO_EIO, 0x1d, "Unavailable" )
106 #define EIO_ADMIN_UNAVAIL \
107  __einfo_error ( EINFO_EIO_ADMIN_UNAVAIL )
108 #define EINFO_EIO_ADMIN_NOTSUP \
109  __einfo_uniqify ( EINFO_EIO, 0x1e, "Unimplemented" )
110 #define EIO_ADMIN_NOTSUP \
111  __einfo_error ( EINFO_EIO_ADMIN_NOTSUP )
112 #define EINFO_EIO_ADMIN_UNKNOWN \
113  __einfo_uniqify ( EINFO_EIO, 0x1f, "Unknown error" )
114 #define EIO_ADMIN_UNKNOWN \
115  __einfo_error ( EINFO_EIO_ADMIN_UNKNOWN )
116 #define EIO_ADMIN( status ) \
117  EUNIQ ( EINFO_EIO, ( (status) & 0x1f ), \
118  EIO_ADMIN_UNSET, EIO_ADMIN_ABORTED, EIO_ADMIN_EXISTS, \
119  EIO_ADMIN_CANCELLED, EIO_ADMIN_DATALOSS, \
120  EIO_ADMIN_DEADLINE, EIO_ADMIN_PRECONDITION, \
121  EIO_ADMIN_INTERNAL, EIO_ADMIN_INVAL, \
122  EIO_ADMIN_NOT_FOUND, EIO_ADMIN_RANGE, EIO_ADMIN_PERM, \
123  EIO_ADMIN_UNAUTH, EIO_ADMIN_RESOURCE, \
124  EIO_ADMIN_UNAVAIL, EIO_ADMIN_NOTSUP, EIO_ADMIN_UNKNOWN )
125 
126 /******************************************************************************
127  *
128  * Device reset
129  *
130  ******************************************************************************
131  */
132 
133 /**
134  * Reset hardware
135  *
136  * @v gve GVE device
137  * @ret rc Return status code
138  */
139 static int gve_reset ( struct gve_nic *gve ) {
140  uint32_t pfn;
141  unsigned int i;
142 
143  /* Skip reset if admin queue page frame number is already
144  * clear. Triggering a reset on an already-reset device seems
145  * to cause a delayed reset to be scheduled. This can cause
146  * the device to end up in a reset loop, where each attempt to
147  * recover from reset triggers another reset a few seconds
148  * later.
149  */
150  pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
151  if ( ! pfn ) {
152  DBGC ( gve, "GVE %p skipping reset\n", gve );
153  return 0;
154  }
155 
156  /* Clear admin queue page frame number */
157  writel ( 0, gve->cfg + GVE_CFG_ADMIN_PFN );
158  wmb();
159 
160  /* Wait for device to reset */
161  for ( i = 0 ; i < GVE_RESET_MAX_WAIT_MS ; i++ ) {
162 
163  /* Delay */
164  mdelay ( 1 );
165 
166  /* Check for reset completion */
167  pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
168  if ( ! pfn )
169  return 0;
170  }
171 
172  DBGC ( gve, "GVE %p reset timed out (PFN %#08x devstat %#08x)\n",
173  gve, bswap_32 ( pfn ),
174  bswap_32 ( readl ( gve->cfg + GVE_CFG_DEVSTAT ) ) );
175  return -ETIMEDOUT;
176 }
177 
178 /******************************************************************************
179  *
180  * Admin queue
181  *
182  ******************************************************************************
183  */
184 
185 /**
186  * Allocate admin queue
187  *
188  * @v gve GVE device
189  * @ret rc Return status code
190  */
191 static int gve_admin_alloc ( struct gve_nic *gve ) {
192  struct dma_device *dma = gve->dma;
193  struct gve_admin *admin = &gve->admin;
194  struct gve_scratch *scratch = &gve->scratch;
195  size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) );
196  size_t scratch_len = sizeof ( *scratch->buf );
197  int rc;
198 
199  /* Allocate admin queue */
200  admin->cmd = dma_alloc ( dma, &admin->map, admin_len, GVE_ALIGN );
201  if ( ! admin->cmd ) {
202  rc = -ENOMEM;
203  goto err_admin;
204  }
205 
206  /* Allocate scratch buffer */
207  scratch->buf = dma_alloc ( dma, &scratch->map, scratch_len, GVE_ALIGN );
208  if ( ! scratch->buf ) {
209  rc = -ENOMEM;
210  goto err_scratch;
211  }
212 
213  DBGC ( gve, "GVE %p AQ at [%08lx,%08lx) scratch [%08lx,%08lx)\n",
214  gve, virt_to_phys ( admin->cmd ),
215  ( virt_to_phys ( admin->cmd ) + admin_len ),
216  virt_to_phys ( scratch->buf ),
217  ( virt_to_phys ( scratch->buf ) + scratch_len ) );
218  return 0;
219 
220  dma_free ( &scratch->map, scratch->buf, scratch_len );
221  err_scratch:
222  dma_free ( &admin->map, admin->cmd, admin_len );
223  err_admin:
224  return rc;
225 }
226 
227 /**
228  * Free admin queue
229  *
230  * @v gve GVE device
231  */
232 static void gve_admin_free ( struct gve_nic *gve ) {
233  struct gve_admin *admin = &gve->admin;
234  struct gve_scratch *scratch = &gve->scratch;
235  size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) );
236  size_t scratch_len = sizeof ( *scratch->buf );
237 
238  /* Free scratch buffer */
239  dma_free ( &scratch->map, scratch->buf, scratch_len );
240 
241  /* Free admin queue */
242  dma_free ( &admin->map, admin->cmd, admin_len );
243 }
244 
245 /**
246  * Enable admin queue
247  *
248  * @v gve GVE device
249  */
250 static void gve_admin_enable ( struct gve_nic *gve ) {
251  struct gve_admin *admin = &gve->admin;
252  size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) );
254 
255  /* Reset queue */
256  admin->prod = 0;
257 
258  /* Program queue addresses and capabilities */
259  base = dma ( &admin->map, admin->cmd );
261  gve->cfg + GVE_CFG_ADMIN_PFN );
262  writel ( bswap_32 ( base & 0xffffffffUL ),
263  gve->cfg + GVE_CFG_ADMIN_BASE_LO );
264  if ( sizeof ( base ) > sizeof ( uint32_t ) ) {
265  writel ( bswap_32 ( ( ( uint64_t ) base ) >> 32 ),
266  gve->cfg + GVE_CFG_ADMIN_BASE_HI );
267  } else {
268  writel ( 0, gve->cfg + GVE_CFG_ADMIN_BASE_HI );
269  }
270  writel ( bswap_16 ( admin_len ), gve->cfg + GVE_CFG_ADMIN_LEN );
272 }
273 
274 /**
275  * Get next available admin queue command slot
276  *
277  * @v gve GVE device
278  * @ret cmd Admin queue command
279  */
280 static union gve_admin_command * gve_admin_command ( struct gve_nic *gve ) {
281  struct gve_admin *admin = &gve->admin;
282  union gve_admin_command *cmd;
283  unsigned int index;
284 
285  /* Get next command slot */
286  index = admin->prod;
287  cmd = &admin->cmd[ index % GVE_ADMIN_COUNT ];
288 
289  /* Initialise request */
290  memset ( cmd, 0, sizeof ( *cmd ) );
291 
292  return cmd;
293 }
294 
295 /**
296  * Wait for admin queue command to complete
297  *
298  * @v gve GVE device
299  * @ret rc Return status code
300  */
301 static int gve_admin_wait ( struct gve_nic *gve ) {
302  struct gve_admin *admin = &gve->admin;
303  uint32_t evt;
304  uint32_t pfn;
305  unsigned int i;
306 
307  /* Wait for any outstanding commands to complete */
308  for ( i = 0 ; i < GVE_ADMIN_MAX_WAIT_MS ; i++ ) {
309 
310  /* Check event counter */
311  rmb();
312  evt = bswap_32 ( readl ( gve->cfg + GVE_CFG_ADMIN_EVT ) );
313  if ( evt == admin->prod )
314  return 0;
315 
316  /* Check for device reset */
317  pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
318  if ( ! pfn )
319  break;
320 
321  /* Delay */
322  mdelay ( 1 );
323  }
324 
325  DBGC ( gve, "GVE %p AQ %#02x %s (completed %#02x, status %#08x)\n",
326  gve, admin->prod, ( pfn ? "timed out" : "saw reset" ), evt,
327  bswap_32 ( readl ( gve->cfg + GVE_CFG_DEVSTAT ) ) );
328  return ( pfn ? -ETIMEDOUT : -ECONNRESET );
329 }
330 
331 /**
332  * Issue admin queue command
333  *
334  * @v gve GVE device
335  * @ret rc Return status code
336  */
337 static int gve_admin ( struct gve_nic *gve ) {
338  struct gve_admin *admin = &gve->admin;
339  union gve_admin_command *cmd;
340  unsigned int index;
343  int rc;
344 
345  /* Ensure admin queue is idle */
346  if ( ( rc = gve_admin_wait ( gve ) ) != 0 )
347  return rc;
348 
349  /* Get next command slot */
350  index = admin->prod;
351  cmd = &admin->cmd[ index % GVE_ADMIN_COUNT ];
352  opcode = cmd->hdr.opcode;
353  DBGC2 ( gve, "GVE %p AQ %#02x command %#04x request:\n",
354  gve, index, opcode );
355  DBGC2_HDA ( gve, 0, cmd, sizeof ( *cmd ) );
356 
357  /* Increment producer counter */
358  admin->prod++;
359 
360  /* Ring doorbell */
361  wmb();
362  writel ( bswap_32 ( admin->prod ), gve->cfg + GVE_CFG_ADMIN_DB );
363 
364  /* Wait for command to complete */
365  if ( ( rc = gve_admin_wait ( gve ) ) != 0 )
366  return rc;
367 
368  /* Check command status */
369  status = be32_to_cpu ( cmd->hdr.status );
370  if ( status != GVE_ADMIN_STATUS_OK ) {
371  rc = -EIO_ADMIN ( status );
372  DBGC ( gve, "GVE %p AQ %#02x command %#04x failed: %#08x\n",
373  gve, index, opcode, status );
374  DBGC_HDA ( gve, 0, cmd, sizeof ( *cmd ) );
375  DBGC ( gve, "GVE %p AQ error: %s\n", gve, strerror ( rc ) );
376  return rc;
377  }
378 
379  DBGC2 ( gve, "GVE %p AQ %#02x command %#04x result:\n",
380  gve, index, opcode );
381  DBGC2_HDA ( gve, 0, cmd, sizeof ( *cmd ) );
382  return 0;
383 }
384 
385 /**
386  * Issue simple admin queue command
387  *
388  * @v gve GVE device
389  * @v opcode Operation code
390  * @v id ID parameter (or zero if not applicable)
391  * @ret rc Return status code
392  *
393  * Several admin queue commands take either an empty parameter list or
394  * a single 32-bit ID parameter.
395  */
396 static int gve_admin_simple ( struct gve_nic *gve, unsigned int opcode,
397  unsigned int id ) {
398  union gve_admin_command *cmd;
399  int rc;
400 
401  /* Construct request */
402  cmd = gve_admin_command ( gve );
403  cmd->hdr.opcode = opcode;
404  cmd->simple.id = cpu_to_be32 ( id );
405 
406  /* Issue command */
407  if ( ( rc = gve_admin ( gve ) ) != 0 )
408  return rc;
409 
410  return 0;
411 }
412 
413 /**
414  * Get device descriptor
415  *
416  * @v gve GVE device
417  * @ret rc Return status code
418  */
419 static int gve_describe ( struct gve_nic *gve ) {
420  struct net_device *netdev = gve->netdev;
421  struct gve_device_descriptor *desc = &gve->scratch.buf->desc;
422  union gve_admin_command *cmd;
423  int rc;
424 
425  /* Construct request */
426  cmd = gve_admin_command ( gve );
427  cmd->hdr.opcode = GVE_ADMIN_DESCRIBE;
428  cmd->desc.addr = cpu_to_be64 ( dma ( &gve->scratch.map, desc ) );
429  cmd->desc.ver = cpu_to_be32 ( GVE_ADMIN_DESCRIBE_VER );
430  cmd->desc.len = cpu_to_be32 ( sizeof ( *desc ) );
431 
432  /* Issue command */
433  if ( ( rc = gve_admin ( gve ) ) != 0 )
434  return rc;
435  DBGC2 ( gve, "GVE %p device descriptor:\n", gve );
436  DBGC2_HDA ( gve, 0, desc, sizeof ( *desc ) );
437 
438  /* Extract queue parameters */
439  gve->events.count = be16_to_cpu ( desc->counters );
440  gve->tx.count = be16_to_cpu ( desc->tx_count );
441  gve->rx.count = be16_to_cpu ( desc->rx_count );
442  DBGC ( gve, "GVE %p using %d TX, %d RX, %d events\n",
443  gve, gve->tx.count, gve->rx.count, gve->events.count );
444 
445  /* Extract network parameters */
446  build_assert ( sizeof ( desc->mac ) == ETH_ALEN );
447  memcpy ( netdev->hw_addr, &desc->mac, sizeof ( desc->mac ) );
448  netdev->mtu = be16_to_cpu ( desc->mtu );
450  DBGC ( gve, "GVE %p MAC %s (\"%s\") MTU %zd\n",
451  gve, eth_ntoa ( netdev->hw_addr ),
452  inet_ntoa ( desc->mac.in ), netdev->mtu );
453 
454  return 0;
455 }
456 
457 /**
458  * Configure device resources
459  *
460  * @v gve GVE device
461  * @ret rc Return status code
462  */
463 static int gve_configure ( struct gve_nic *gve ) {
464  struct gve_events *events = &gve->events;
465  struct gve_irqs *irqs = &gve->irqs;
466  union gve_admin_command *cmd;
467  unsigned int db_off;
468  unsigned int i;
469  int rc;
470 
471  /* Construct request */
472  cmd = gve_admin_command ( gve );
473  cmd->hdr.opcode = GVE_ADMIN_CONFIGURE;
474  cmd->conf.events =
475  cpu_to_be64 ( dma ( &events->map, events->event ) );
476  cmd->conf.irqs =
477  cpu_to_be64 ( dma ( &irqs->map, irqs->irq ) );
478  cmd->conf.num_events = cpu_to_be32 ( events->count );
479  cmd->conf.num_irqs = cpu_to_be32 ( GVE_IRQ_COUNT );
480  cmd->conf.irq_stride = cpu_to_be32 ( sizeof ( irqs->irq[0] ) );
481 
482  /* Issue command */
483  if ( ( rc = gve_admin ( gve ) ) != 0 )
484  return rc;
485 
486  /* Disable all interrupts */
487  for ( i = 0 ; i < GVE_IRQ_COUNT ; i++ ) {
488  db_off = ( be32_to_cpu ( irqs->irq[i].db_idx ) *
489  sizeof ( uint32_t ) );
490  DBGC ( gve, "GVE %p IRQ %d doorbell +%#04x\n", gve, i, db_off );
491  irqs->db[i] = ( gve->db + db_off );
492  writel ( bswap_32 ( GVE_IRQ_DISABLE ), irqs->db[i] );
493  }
494 
495  return 0;
496 }
497 
498 /**
499  * Deconfigure device resources
500  *
501  * @v gve GVE device
502  * @ret rc Return status code
503  */
504 static int gve_deconfigure ( struct gve_nic *gve ) {
505  int rc;
506 
507  /* Issue command (with meaningless ID) */
508  if ( ( rc = gve_admin_simple ( gve, GVE_ADMIN_DECONFIGURE, 0 ) ) != 0 )
509  return rc;
510 
511  return 0;
512 }
513 
514 /**
515  * Register queue page list
516  *
517  * @v gve GVE device
518  * @v qpl Queue page list
519  * @ret rc Return status code
520  */
521 static int gve_register ( struct gve_nic *gve, struct gve_qpl *qpl ) {
522  struct gve_pages *pages = &gve->scratch.buf->pages;
523  union gve_admin_command *cmd;
525  unsigned int i;
526  int rc;
527 
528  /* Build page address list */
529  for ( i = 0 ; i < qpl->count ; i++ ) {
530  addr = user_to_phys ( qpl->data, ( i * GVE_PAGE_SIZE ) );
531  pages->addr[i] = cpu_to_be64 ( dma_phys ( &qpl->map, addr ) );
532  }
533 
534  /* Construct request */
535  cmd = gve_admin_command ( gve );
536  cmd->hdr.opcode = GVE_ADMIN_REGISTER;
537  cmd->reg.id = cpu_to_be32 ( qpl->id );
538  cmd->reg.count = cpu_to_be32 ( qpl->count );
539  cmd->reg.addr = cpu_to_be64 ( dma ( &gve->scratch.map, pages ) );
540  cmd->reg.size = cpu_to_be64 ( GVE_PAGE_SIZE );
541 
542  /* Issue command */
543  if ( ( rc = gve_admin ( gve ) ) != 0 )
544  return rc;
545 
546  return 0;
547 }
548 
549 /**
550  * Unregister page list
551  *
552  * @v gve GVE device
553  * @v qpl Queue page list
554  * @ret rc Return status code
555  */
556 static int gve_unregister ( struct gve_nic *gve, struct gve_qpl *qpl ) {
557  int rc;
558 
559  /* Issue command */
560  if ( ( rc = gve_admin_simple ( gve, GVE_ADMIN_UNREGISTER,
561  qpl->id ) ) != 0 ) {
562  return rc;
563  }
564 
565  return 0;
566 }
567 
568 /**
569  * Construct command to create transmit queue
570  *
571  * @v queue Transmit queue
572  * @v cmd Admin queue command
573  */
574 static void gve_create_tx_param ( struct gve_queue *queue,
575  union gve_admin_command *cmd ) {
576  struct gve_admin_create_tx *create = &cmd->create_tx;
577  const struct gve_queue_type *type = queue->type;
578  physaddr_t desc = user_to_phys ( queue->desc, 0 );
579 
580  /* Construct request parameters */
581  create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) );
582  create->desc = cpu_to_be64 ( dma_phys ( &queue->desc_map, desc ) );
583  create->qpl_id = cpu_to_be32 ( type->qpl );
584  create->notify_id = cpu_to_be32 ( type->irq );
585 }
586 
587 /**
588  * Construct command to create receive queue
589  *
590  * @v queue Receive queue
591  * @v cmd Admin queue command
592  */
593 static void gve_create_rx_param ( struct gve_queue *queue,
594  union gve_admin_command *cmd ) {
595  struct gve_admin_create_rx *create = &cmd->create_rx;
596  const struct gve_queue_type *type = queue->type;
597  physaddr_t desc = user_to_phys ( queue->desc, 0 );
598  physaddr_t cmplt = user_to_phys ( queue->cmplt, 0 );
599 
600  /* Construct request parameters */
601  create->notify_id = cpu_to_be32 ( type->irq );
602  create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) );
603  create->desc = cpu_to_be64 ( dma_phys ( &queue->desc_map, desc ) );
604  create->cmplt = cpu_to_be64 ( dma_phys ( &queue->cmplt_map, cmplt ) );
605  create->qpl_id = cpu_to_be32 ( type->qpl );
606  create->bufsz = cpu_to_be16 ( GVE_BUF_SIZE );
607 }
608 
609 /**
610  * Create transmit or receive queue
611  *
612  * @v gve GVE device
613  * @v queue Descriptor queue
614  * @ret rc Return status code
615  */
616 static int gve_create_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
617  const struct gve_queue_type *type = queue->type;
618  union gve_admin_command *cmd;
619  unsigned int db_off;
620  unsigned int evt_idx;
621  int rc;
622 
623  /* Reset queue */
624  queue->prod = 0;
625  queue->cons = 0;
626 
627  /* Construct request */
628  cmd = gve_admin_command ( gve );
629  cmd->hdr.opcode = type->create;
630  type->param ( queue, cmd );
631 
632  /* Issue command */
633  if ( ( rc = gve_admin ( gve ) ) != 0 )
634  return rc;
635 
636  /* Record indices */
637  db_off = ( be32_to_cpu ( queue->res->db_idx ) * sizeof ( uint32_t ) );
638  evt_idx = be32_to_cpu ( queue->res->evt_idx );
639  DBGC ( gve, "GVE %p %s doorbell +%#04x event counter %d\n",
640  gve, type->name, db_off, evt_idx );
641  queue->db = ( gve->db + db_off );
642  assert ( evt_idx < gve->events.count );
643  queue->event = &gve->events.event[evt_idx];
644  assert ( queue->event->count == 0 );
645 
646  return 0;
647 }
648 
649 /**
650  * Destroy transmit or receive queue
651  *
652  * @v gve GVE device
653  * @v queue Descriptor queue
654  * @ret rc Return status code
655  */
656 static int gve_destroy_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
657  const struct gve_queue_type *type = queue->type;
658  int rc;
659 
660  /* Issue command */
661  if ( ( rc = gve_admin_simple ( gve, type->destroy, 0 ) ) != 0 )
662  return rc;
663 
664  return 0;
665 }
666 
667 /******************************************************************************
668  *
669  * Network device interface
670  *
671  ******************************************************************************
672  */
673 
674 /**
675  * Allocate shared queue resources
676  *
677  * @v gve GVE device
678  * @ret rc Return status code
679  */
680 static int gve_alloc_shared ( struct gve_nic *gve ) {
681  struct dma_device *dma = gve->dma;
682  struct gve_irqs *irqs = &gve->irqs;
683  struct gve_events *events = &gve->events;
684  size_t irqs_len = ( GVE_IRQ_COUNT * sizeof ( irqs->irq[0] ) );
685  size_t events_len = ( gve->events.count * sizeof ( events->event[0] ) );
686  int rc;
687 
688  /* Allocate interrupt channels */
689  irqs->irq = dma_alloc ( dma, &irqs->map, irqs_len, GVE_ALIGN );
690  if ( ! irqs->irq ) {
691  rc = -ENOMEM;
692  goto err_irqs;
693  }
694  DBGC ( gve, "GVE %p IRQs at [%08lx,%08lx)\n",
695  gve, virt_to_phys ( irqs->irq ),
696  ( virt_to_phys ( irqs->irq ) + irqs_len ) );
697 
698  /* Allocate event counters */
699  events->event = dma_alloc ( dma, &events->map, events_len, GVE_ALIGN );
700  if ( ! events->event ) {
701  rc = -ENOMEM;
702  goto err_events;
703  }
704  DBGC ( gve, "GVE %p events at [%08lx,%08lx)\n",
705  gve, virt_to_phys ( events->event ),
706  ( virt_to_phys ( events->event ) + events_len ) );
707 
708  return 0;
709 
710  dma_free ( &events->map, events->event, events_len );
711  err_events:
712  dma_free ( &irqs->map, irqs->irq, irqs_len );
713  err_irqs:
714  return rc;
715 }
716 
717 /**
718  * Free shared queue resources
719  *
720  * @v gve GVE device
721  */
722 static void gve_free_shared ( struct gve_nic *gve ) {
723  struct gve_irqs *irqs = &gve->irqs;
724  struct gve_events *events = &gve->events;
725  size_t irqs_len = ( GVE_IRQ_COUNT * sizeof ( irqs->irq[0] ) );
726  size_t events_len = ( gve->events.count * sizeof ( events->event[0] ) );
727 
728  /* Free event counters */
729  dma_free ( &events->map, events->event, events_len );
730 
731  /* Free interrupt channels */
732  dma_free ( &irqs->map, irqs->irq, irqs_len );
733 }
734 
735 /**
736  * Allocate queue page list
737  *
738  * @v gve GVE device
739  * @v qpl Queue page list
740  * @v id Queue page list ID
741  * @v buffers Number of data buffers
742  * @ret rc Return status code
743  */
744 static int gve_alloc_qpl ( struct gve_nic *gve, struct gve_qpl *qpl,
745  uint32_t id, unsigned int buffers ) {
746  size_t len;
747 
748  /* Record ID */
749  qpl->id = id;
750 
751  /* Calculate number of pages required */
753  qpl->count = ( ( buffers + GVE_BUF_PER_PAGE - 1 ) / GVE_BUF_PER_PAGE );
754  assert ( qpl->count <= GVE_QPL_MAX );
755 
756  /* Allocate pages (as a single block) */
757  len = ( qpl->count * GVE_PAGE_SIZE );
758  qpl->data = dma_umalloc ( gve->dma, &qpl->map, len, GVE_ALIGN );
759  if ( ! qpl->data )
760  return -ENOMEM;
761 
762  DBGC ( gve, "GVE %p QPL %#08x at [%08lx,%08lx)\n",
763  gve, qpl->id, user_to_phys ( qpl->data, 0 ),
764  user_to_phys ( qpl->data, len ) );
765  return 0;
766 }
767 
768 /**
769  * Free queue page list
770  *
771  * @v gve GVE device
772  * @v qpl Queue page list
773  */
774 static void gve_free_qpl ( struct gve_nic *nic __unused,
775  struct gve_qpl *qpl ) {
776  size_t len = ( qpl->count * GVE_PAGE_SIZE );
777 
778  /* Free pages */
779  dma_ufree ( &qpl->map, qpl->data, len );
780 }
781 
782 /**
783  * Get buffer address (within queue page list address space)
784  *
785  * @v queue Descriptor queue
786  * @v index Buffer index
787  * @ret addr Buffer address within queue page list address space
788  */
789 static inline __attribute__ (( always_inline)) size_t
790 gve_address ( struct gve_queue *queue, unsigned int index ) {
791 
792  /* We allocate sufficient pages for the maximum fill level of
793  * buffers, and reuse the pages in strict rotation as we
794  * progress through the queue.
795  */
796  return ( ( index & ( queue->fill - 1 ) ) * GVE_BUF_SIZE );
797 }
798 
799 /**
800  * Get buffer address
801  *
802  * @v queue Descriptor queue
803  * @v index Buffer index
804  * @ret addr Buffer address
805  */
806 static inline __attribute__ (( always_inline )) userptr_t
807 gve_buffer ( struct gve_queue *queue, unsigned int index ) {
808 
809  /* Pages are currently allocated as a single contiguous block */
810  return userptr_add ( queue->qpl.data, gve_address ( queue, index ) );
811 }
812 
813 /**
814  * Calculate next receive sequence number
815  *
816  * @v seq Current sequence number, or zero to start sequence
817  * @ret next Next sequence number
818  */
819 static inline __attribute__ (( always_inline )) unsigned int
820 gve_next ( unsigned int seq ) {
821 
822  /* The receive completion sequence number is a modulo 7
823  * counter that cycles through the non-zero three-bit values 1
824  * to 7 inclusive.
825  *
826  * Since 7 is coprime to 2^n, this ensures that the sequence
827  * number changes each time that a new completion is written
828  * to memory.
829  *
830  * Since the counter takes only non-zero values, this ensures
831  * that the sequence number changes whenever a new completion
832  * is first written to a zero-initialised completion ring.
833  */
834  seq = ( ( seq + 1 ) & GVE_RX_SEQ_MASK );
835  return ( seq ? seq : 1 );
836 }
837 
838 /**
839  * Allocate descriptor queue
840  *
841  * @v gve GVE device
842  * @v queue Descriptor queue
843  * @ret rc Return status code
844  */
845 static int gve_alloc_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
846  const struct gve_queue_type *type = queue->type;
847  struct dma_device *dma = gve->dma;
848  size_t desc_len = ( queue->count * type->desc_len );
849  size_t cmplt_len = ( queue->count * type->cmplt_len );
850  size_t res_len = sizeof ( *queue->res );
851  struct gve_buffer buf;
852  size_t offset;
853  unsigned int i;
854  int rc;
855 
856  /* Sanity checks */
857  if ( ( queue->count == 0 ) ||
858  ( queue->count & ( queue->count - 1 ) ) ) {
859  DBGC ( gve, "GVE %p %s invalid queue size %d\n",
860  gve, type->name, queue->count );
861  rc = -EINVAL;
862  goto err_sanity;
863  }
864 
865  /* Calculate maximum fill level */
866  assert ( ( type->fill & ( type->fill - 1 ) ) == 0 );
867  queue->fill = type->fill;
868  if ( queue->fill > queue->count )
869  queue->fill = queue->count;
870  DBGC ( gve, "GVE %p %s using QPL %#08x with %d/%d descriptors\n",
871  gve, type->name, type->qpl, queue->fill, queue->count );
872 
873  /* Allocate queue page list */
874  if ( ( rc = gve_alloc_qpl ( gve, &queue->qpl, type->qpl,
875  queue->fill ) ) != 0 )
876  goto err_qpl;
877 
878  /* Allocate descriptors */
879  queue->desc = dma_umalloc ( dma, &queue->desc_map, desc_len,
880  GVE_ALIGN );
881  if ( ! queue->desc ) {
882  rc = -ENOMEM;
883  goto err_desc;
884  }
885  DBGC ( gve, "GVE %p %s descriptors at [%08lx,%08lx)\n",
886  gve, type->name, user_to_phys ( queue->desc, 0 ),
887  user_to_phys ( queue->desc, desc_len ) );
888 
889  /* Allocate completions */
890  if ( cmplt_len ) {
891  queue->cmplt = dma_umalloc ( dma, &queue->cmplt_map, cmplt_len,
892  GVE_ALIGN );
893  if ( ! queue->cmplt ) {
894  rc = -ENOMEM;
895  goto err_cmplt;
896  }
897  DBGC ( gve, "GVE %p %s completions at [%08lx,%08lx)\n",
898  gve, type->name, user_to_phys ( queue->cmplt, 0 ),
899  user_to_phys ( queue->cmplt, cmplt_len ) );
900  }
901 
902  /* Allocate queue resources */
903  queue->res = dma_alloc ( dma, &queue->res_map, res_len, GVE_ALIGN );
904  if ( ! queue->res ) {
905  rc = -ENOMEM;
906  goto err_res;
907  }
908  memset ( queue->res, 0, res_len );
909 
910  /* Populate descriptor offsets */
911  offset = ( type->desc_len - sizeof ( buf ) );
912  for ( i = 0 ; i < queue->count ; i++ ) {
913  buf.addr = cpu_to_be64 ( gve_address ( queue, i ) );
914  copy_to_user ( queue->desc, offset, &buf, sizeof ( buf ) );
915  offset += type->desc_len;
916  }
917 
918  return 0;
919 
920  dma_free ( &queue->res_map, queue->res, res_len );
921  err_res:
922  if ( cmplt_len )
923  dma_ufree ( &queue->cmplt_map, queue->cmplt, cmplt_len );
924  err_cmplt:
925  dma_ufree ( &queue->desc_map, queue->desc, desc_len );
926  err_desc:
927  gve_free_qpl ( gve, &queue->qpl );
928  err_qpl:
929  err_sanity:
930  return rc;
931 }
932 
933 /**
934  * Free descriptor queue
935  *
936  * @v gve GVE device
937  * @v queue Descriptor queue
938  */
939 static void gve_free_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
940  const struct gve_queue_type *type = queue->type;
941  size_t desc_len = ( queue->count * type->desc_len );
942  size_t cmplt_len = ( queue->count * type->cmplt_len );
943  size_t res_len = sizeof ( *queue->res );
944 
945  /* Free queue resources */
946  dma_free ( &queue->res_map, queue->res, res_len );
947 
948  /* Free completions, if applicable */
949  if ( cmplt_len )
950  dma_ufree ( &queue->cmplt_map, queue->cmplt, cmplt_len );
951 
952  /* Free descriptors */
953  dma_ufree ( &queue->desc_map, queue->desc, desc_len );
954 
955  /* Free queue page list */
956  gve_free_qpl ( gve, &queue->qpl );
957 }
958 
959 /**
960  * Start up device
961  *
962  * @v gve GVE device
963  * @ret rc Return status code
964  */
965 static int gve_start ( struct gve_nic *gve ) {
966  struct net_device *netdev = gve->netdev;
967  struct gve_queue *tx = &gve->tx;
968  struct gve_queue *rx = &gve->rx;
969  struct io_buffer *iobuf;
970  unsigned int i;
971  int rc;
972 
973  /* Cancel any pending transmissions */
974  for ( i = 0 ; i < ( sizeof ( gve->tx_iobuf ) /
975  sizeof ( gve->tx_iobuf[0] ) ) ; i++ ) {
976  iobuf = gve->tx_iobuf[i];
977  gve->tx_iobuf[i] = NULL;
978  if ( iobuf )
980  }
981 
982  /* Invalidate receive completions */
983  memset_user ( rx->cmplt, 0, 0, ( rx->count * rx->type->cmplt_len ) );
984 
985  /* Reset receive sequence */
986  gve->seq = gve_next ( 0 );
987 
988  /* Configure device resources */
989  if ( ( rc = gve_configure ( gve ) ) != 0 )
990  goto err_configure;
991 
992  /* Register transmit queue page list */
993  if ( ( rc = gve_register ( gve, &tx->qpl ) ) != 0 )
994  goto err_register_tx;
995 
996  /* Register receive queue page list */
997  if ( ( rc = gve_register ( gve, &rx->qpl ) ) != 0 )
998  goto err_register_rx;
999 
1000  /* Create transmit queue */
1001  if ( ( rc = gve_create_queue ( gve, tx ) ) != 0 )
1002  goto err_create_tx;
1003 
1004  /* Create receive queue */
1005  if ( ( rc = gve_create_queue ( gve, rx ) ) != 0 )
1006  goto err_create_rx;
1007 
1008  return 0;
1009 
1010  gve_destroy_queue ( gve, rx );
1011  err_create_rx:
1012  gve_destroy_queue ( gve, tx );
1013  err_create_tx:
1014  gve_unregister ( gve, &rx->qpl );
1015  err_register_rx:
1016  gve_unregister ( gve, &tx->qpl );
1017  err_register_tx:
1018  gve_deconfigure ( gve );
1019  err_configure:
1020  return rc;
1021 }
1022 
1023 /**
1024  * Stop device
1025  *
1026  * @v gve GVE device
1027  */
1028 static void gve_stop ( struct gve_nic *gve ) {
1029  struct gve_queue *tx = &gve->tx;
1030  struct gve_queue *rx = &gve->rx;
1031 
1032  /* Destroy queues */
1033  gve_destroy_queue ( gve, rx );
1034  gve_destroy_queue ( gve, tx );
1035 
1036  /* Unregister page lists */
1037  gve_unregister ( gve, &rx->qpl );
1038  gve_unregister ( gve, &tx->qpl );
1039 
1040  /* Deconfigure device */
1041  gve_deconfigure ( gve );
1042 }
1043 
1044 /**
1045  * Device startup process
1046  *
1047  * @v gve GVE device
1048  */
1049 static void gve_startup ( struct gve_nic *gve ) {
1050  struct net_device *netdev = gve->netdev;
1051  int rc;
1052 
1053  /* Reset device */
1054  if ( ( rc = gve_reset ( gve ) ) != 0 )
1055  goto err_reset;
1056 
1057  /* Enable admin queue */
1058  gve_admin_enable ( gve );
1059 
1060  /* Start device */
1061  if ( ( rc = gve_start ( gve ) ) != 0 )
1062  goto err_start;
1063 
1064  /* Reset retry count */
1065  gve->retries = 0;
1066 
1067  /* (Ab)use link status to report startup status */
1068  netdev_link_up ( netdev );
1069 
1070  return;
1071 
1072  gve_stop ( gve );
1073  err_start:
1074  err_reset:
1075  DBGC ( gve, "GVE %p startup failed: %s\n", gve, strerror ( rc ) );
1076  netdev_link_err ( netdev, rc );
1077  if ( gve->retries++ < GVE_RESET_MAX_RETRY )
1078  process_add ( &gve->startup );
1079 }
1080 
1081 /**
1082  * Trigger startup process
1083  *
1084  * @v gve GVE device
1085  */
1086 static void gve_restart ( struct gve_nic *gve ) {
1087  struct net_device *netdev = gve->netdev;
1088 
1089  /* Mark link down to inhibit polling and transmit activity */
1091 
1092  /* Schedule startup process */
1093  process_add ( &gve->startup );
1094 }
1095 
1096 /**
1097  * Reset recovery watchdog
1098  *
1099  * @v timer Reset recovery watchdog timer
1100  * @v over Failure indicator
1101  */
1102 static void gve_watchdog ( struct retry_timer *timer, int over __unused ) {
1103  struct gve_nic *gve = container_of ( timer, struct gve_nic, watchdog );
1105  uint32_t pfn;
1106  int rc;
1107 
1108  /* Reschedule watchdog */
1110 
1111  /* Reset device (for test purposes) if applicable */
1112  if ( ( rc = inject_fault ( VM_MIGRATED_RATE ) ) != 0 ) {
1113  DBGC ( gve, "GVE %p synthesising host reset\n", gve );
1114  writel ( 0, gve->cfg + GVE_CFG_ADMIN_PFN );
1115  }
1116 
1117  /* Check for activity since last timer invocation */
1118  activity = ( gve->tx.cons + gve->rx.cons );
1119  if ( activity != gve->activity ) {
1120  gve->activity = activity;
1121  return;
1122  }
1123 
1124  /* Check for reset */
1125  pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
1126  if ( pfn ) {
1127  DBGC2 ( gve, "GVE %p idle but not in reset\n", gve );
1128  return;
1129  }
1130 
1131  /* Schedule restart */
1132  DBGC ( gve, "GVE %p watchdog detected reset by host\n", gve );
1133  gve_restart ( gve );
1134 }
1135 
1136 /**
1137  * Open network device
1138  *
1139  * @v netdev Network device
1140  * @ret rc Return status code
1141  */
1142 static int gve_open ( struct net_device *netdev ) {
1143  struct gve_nic *gve = netdev->priv;
1144  struct gve_queue *tx = &gve->tx;
1145  struct gve_queue *rx = &gve->rx;
1146  int rc;
1147 
1148  /* Allocate shared queue resources */
1149  if ( ( rc = gve_alloc_shared ( gve ) ) != 0 )
1150  goto err_alloc_shared;
1151 
1152  /* Allocate and prepopulate transmit queue */
1153  if ( ( rc = gve_alloc_queue ( gve, tx ) ) != 0 )
1154  goto err_alloc_tx;
1155 
1156  /* Allocate and prepopulate receive queue */
1157  if ( ( rc = gve_alloc_queue ( gve, rx ) ) != 0 )
1158  goto err_alloc_rx;
1159 
1160  /* Trigger startup */
1161  gve_restart ( gve );
1162 
1163  /* Start reset recovery watchdog timer */
1165 
1166  return 0;
1167 
1168  gve_free_queue ( gve, rx );
1169  err_alloc_rx:
1170  gve_free_queue ( gve, tx );
1171  err_alloc_tx:
1172  gve_free_shared ( gve );
1173  err_alloc_shared:
1174  return rc;
1175 }
1176 
1177 /**
1178  * Close network device
1179  *
1180  * @v netdev Network device
1181  */
1182 static void gve_close ( struct net_device *netdev ) {
1183  struct gve_nic *gve = netdev->priv;
1184  struct gve_queue *tx = &gve->tx;
1185  struct gve_queue *rx = &gve->rx;
1186 
1187  /* Stop reset recovery timer */
1188  stop_timer ( &gve->watchdog );
1189 
1190  /* Terminate startup process */
1191  process_del ( &gve->startup );
1192 
1193  /* Stop and reset device */
1194  gve_stop ( gve );
1195  gve_reset ( gve );
1196 
1197  /* Free queues */
1198  gve_free_queue ( gve, rx );
1199  gve_free_queue ( gve, tx );
1200 
1201  /* Free shared queue resources */
1202  gve_free_shared ( gve );
1203 }
1204 
1205 /**
1206  * Transmit packet
1207  *
1208  * @v netdev Network device
1209  * @v iobuf I/O buffer
1210  * @ret rc Return status code
1211  */
1212 static int gve_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
1213  struct gve_nic *gve = netdev->priv;
1214  struct gve_queue *tx = &gve->tx;
1215  struct gve_tx_descriptor desc;
1216  unsigned int count;
1217  unsigned int index;
1218  size_t frag_len;
1219  size_t offset;
1220  size_t len;
1221 
1222  /* Do nothing if queues are not yet set up */
1223  if ( ! netdev_link_ok ( netdev ) )
1224  return -ENETDOWN;
1225 
1226  /* Defer packet if there is no space in the transmit ring */
1227  len = iob_len ( iobuf );
1228  count = ( ( len + GVE_BUF_SIZE - 1 ) / GVE_BUF_SIZE );
1229  if ( ( ( tx->prod - tx->cons ) + count ) > tx->fill ) {
1230  netdev_tx_defer ( netdev, iobuf );
1231  return 0;
1232  }
1233 
1234  /* Copy packet to queue pages and populate descriptors */
1235  for ( offset = 0 ; offset < len ; offset += frag_len ) {
1236 
1237  /* Sanity check */
1238  assert ( gve->tx_iobuf[ tx->prod % GVE_TX_FILL ] == NULL );
1239 
1240  /* Copy packet fragment */
1241  frag_len = ( len - offset );
1242  if ( frag_len > GVE_BUF_SIZE )
1243  frag_len = GVE_BUF_SIZE;
1244  copy_to_user ( gve_buffer ( tx, tx->prod ), 0,
1245  ( iobuf->data + offset ), frag_len );
1246 
1247  /* Populate descriptor */
1248  index = ( tx->prod++ & ( tx->count - 1 ) );
1249  memset ( &desc.pkt, 0, sizeof ( desc.pkt ) );
1250  if ( offset ) {
1251  desc.pkt.type = GVE_TX_TYPE_CONT;
1252  } else {
1253  desc.pkt.type = GVE_TX_TYPE_START;
1254  desc.pkt.count = count;
1255  desc.pkt.total = cpu_to_be16 ( len );
1256  }
1257  desc.pkt.len = cpu_to_be16 ( frag_len );
1258  copy_to_user ( tx->desc, ( index * sizeof ( desc ) ), &desc,
1259  sizeof ( desc.pkt ) );
1260  DBGC2 ( gve, "GVE %p TX %#04x %#02x:%#02x len %#04x/%#04x at "
1261  "%#08zx\n", gve, index, desc.pkt.type, desc.pkt.count,
1262  be16_to_cpu ( desc.pkt.len ),
1263  be16_to_cpu ( desc.pkt.total ),
1264  gve_address ( tx, index ) );
1265  }
1266  assert ( ( tx->prod - tx->cons ) <= tx->fill );
1267 
1268  /* Record I/O buffer against final descriptor */
1269  gve->tx_iobuf[ ( tx->prod - 1U ) % GVE_TX_FILL ] = iobuf;
1270 
1271  /* Ring doorbell */
1272  wmb();
1273  writel ( bswap_32 ( tx->prod ), tx->db );
1274 
1275  return 0;
1276 }
1277 
1278 /**
1279  * Poll for completed transmissions
1280  *
1281  * @v netdev Network device
1282  */
1283 static void gve_poll_tx ( struct net_device *netdev ) {
1284  struct gve_nic *gve = netdev->priv;
1285  struct gve_queue *tx = &gve->tx;
1286  struct io_buffer *iobuf;
1287  uint32_t count;
1288 
1289  /* Read event counter */
1290  count = be32_to_cpu ( tx->event->count );
1291 
1292  /* Process transmit completions */
1293  while ( count != tx->cons ) {
1294  DBGC2 ( gve, "GVE %p TX %#04x complete\n", gve, tx->cons );
1295  iobuf = gve->tx_iobuf[ tx->cons % GVE_TX_FILL ];
1296  gve->tx_iobuf[ tx->cons % GVE_TX_FILL ] = NULL;
1297  tx->cons++;
1298  if ( iobuf )
1299  netdev_tx_complete ( netdev, iobuf );
1300  }
1301 }
1302 
1303 /**
1304  * Poll for received packets
1305  *
1306  * @v netdev Network device
1307  */
1308 static void gve_poll_rx ( struct net_device *netdev ) {
1309  struct gve_nic *gve = netdev->priv;
1310  struct gve_queue *rx = &gve->rx;
1311  struct gve_rx_completion cmplt;
1312  struct io_buffer *iobuf;
1313  unsigned int index;
1314  unsigned int seq;
1315  uint32_t cons;
1316  size_t offset;
1317  size_t total;
1318  size_t len;
1319  int rc;
1320 
1321  /* Process receive completions */
1322  cons = rx->cons;
1323  seq = gve->seq;
1324  total = 0;
1325  while ( 1 ) {
1326 
1327  /* Read next possible completion */
1328  index = ( cons++ & ( rx->count - 1 ) );
1329  offset = ( ( index * sizeof ( cmplt ) ) +
1330  offsetof ( typeof ( cmplt ), pkt ) );
1331  copy_from_user ( &cmplt.pkt, rx->cmplt, offset,
1332  sizeof ( cmplt.pkt ) );
1333 
1334  /* Check sequence number */
1335  if ( ( cmplt.pkt.seq & GVE_RX_SEQ_MASK ) != seq )
1336  break;
1337  seq = gve_next ( seq );
1338 
1339  /* Parse completion */
1340  len = be16_to_cpu ( cmplt.pkt.len );
1341  DBGC2 ( gve, "GVE %p RX %#04x %#02x:%#02x len %#04zx at "
1342  "%#08zx\n", gve, index, cmplt.pkt.seq, cmplt.pkt.flags,
1343  len, gve_address ( rx, index ) );
1344 
1345  /* Accumulate a complete packet */
1346  if ( cmplt.pkt.flags & GVE_RXF_ERROR ) {
1347  total = 0;
1348  } else {
1349  total += len;
1350  if ( cmplt.pkt.flags & GVE_RXF_MORE )
1351  continue;
1352  }
1353  gve->seq = seq;
1354 
1355  /* Allocate and populate I/O buffer */
1356  iobuf = ( total ? alloc_iob ( total ) : NULL );
1357  for ( ; rx->cons != cons ; rx->cons++ ) {
1358 
1359  /* Re-read completion length */
1360  index = ( rx->cons & ( rx->count - 1 ) );
1361  offset = ( ( index * sizeof ( cmplt ) ) +
1362  offsetof ( typeof ( cmplt ), pkt.len ) );
1363  copy_from_user ( &cmplt.pkt, rx->cmplt, offset,
1364  sizeof ( cmplt.pkt.len ) );
1365 
1366  /* Copy data */
1367  if ( iobuf ) {
1368  len = be16_to_cpu ( cmplt.pkt.len );
1369  copy_from_user ( iob_put ( iobuf, len ),
1370  gve_buffer ( rx, rx->cons ),
1371  0, len );
1372  }
1373  }
1374  assert ( ( iobuf == NULL ) || ( iob_len ( iobuf ) == total ) );
1375  total = 0;
1376 
1377  /* Hand off packet to network stack */
1378  if ( iobuf ) {
1379  iob_pull ( iobuf, GVE_RX_PAD );
1380  netdev_rx ( netdev, iobuf );
1381  } else {
1382  rc = ( ( cmplt.pkt.flags & GVE_RXF_ERROR ) ?
1383  -EIO : -ENOMEM );
1384  netdev_rx_err ( netdev, NULL, rc );
1385  }
1386 
1387  /* Sanity check */
1388  assert ( rx->cons == cons );
1389  assert ( gve->seq == seq );
1390  assert ( total == 0 );
1391  }
1392 }
1393 
1394 /**
1395  * Refill receive queue
1396  *
1397  * @v netdev Network device
1398  */
1399 static void gve_refill_rx ( struct net_device *netdev ) {
1400  struct gve_nic *gve = netdev->priv;
1401  struct gve_queue *rx = &gve->rx;
1402  unsigned int prod;
1403 
1404  /* The receive descriptors are prepopulated at the time of
1405  * creating the receive queue (pointing to the preallocated
1406  * queue pages). Refilling is therefore just a case of
1407  * ringing the doorbell if the device is not yet aware of any
1408  * available descriptors.
1409  */
1410  prod = ( rx->cons + rx->fill );
1411  if ( prod != rx->prod ) {
1412  rx->prod = prod;
1413  writel ( bswap_32 ( prod ), rx->db );
1414  DBGC2 ( gve, "GVE %p RX %#04x ready\n", gve, rx->prod );
1415  }
1416 }
1417 
1418 /**
1419  * Poll for completed and received packets
1420  *
1421  * @v netdev Network device
1422  */
1423 static void gve_poll ( struct net_device *netdev ) {
1424 
1425  /* Do nothing if queues are not yet set up */
1426  if ( ! netdev_link_ok ( netdev ) )
1427  return;
1428 
1429  /* Poll for transmit completions */
1430  gve_poll_tx ( netdev );
1431 
1432  /* Poll for receive completions */
1433  gve_poll_rx ( netdev );
1434 
1435  /* Refill receive queue */
1436  gve_refill_rx ( netdev );
1437 }
1438 
1439 /** GVE network device operations */
1441  .open = gve_open,
1442  .close = gve_close,
1443  .transmit = gve_transmit,
1444  .poll = gve_poll,
1445 };
1446 
1447 /******************************************************************************
1448  *
1449  * PCI interface
1450  *
1451  ******************************************************************************
1452  */
1453 
1454 /** Transmit descriptor queue type */
1455 static const struct gve_queue_type gve_tx_type = {
1456  .name = "TX",
1457  .param = gve_create_tx_param,
1458  .qpl = GVE_TX_QPL,
1459  .irq = GVE_TX_IRQ,
1460  .fill = GVE_TX_FILL,
1461  .desc_len = sizeof ( struct gve_tx_descriptor ),
1462  .create = GVE_ADMIN_CREATE_TX,
1463  .destroy = GVE_ADMIN_DESTROY_TX,
1464 };
1465 
1466 /** Receive descriptor queue type */
1467 static const struct gve_queue_type gve_rx_type = {
1468  .name = "RX",
1469  .param = gve_create_rx_param,
1470  .qpl = GVE_RX_QPL,
1471  .irq = GVE_RX_IRQ,
1472  .fill = GVE_RX_FILL,
1473  .desc_len = sizeof ( struct gve_rx_descriptor ),
1474  .cmplt_len = sizeof ( struct gve_rx_completion ),
1475  .create = GVE_ADMIN_CREATE_RX,
1476  .destroy = GVE_ADMIN_DESTROY_RX,
1477 };
1478 
1479 /**
1480  * Set up admin queue and get device description
1481  *
1482  * @v gve GVE device
1483  * @ret rc Return status code
1484  */
1485 static int gve_setup ( struct gve_nic *gve ) {
1486  unsigned int i;
1487  int rc;
1488 
1489  /* Attempt several times, since the device may decide to add
1490  * in a few spurious resets.
1491  */
1492  for ( i = 0 ; i < GVE_RESET_MAX_RETRY ; i++ ) {
1493 
1494  /* Reset device */
1495  if ( ( rc = gve_reset ( gve ) ) != 0 )
1496  continue;
1497 
1498  /* Enable admin queue */
1499  gve_admin_enable ( gve );
1500 
1501  /* Fetch MAC address */
1502  if ( ( rc = gve_describe ( gve ) ) != 0 )
1503  continue;
1504 
1505  /* Success */
1506  return 0;
1507  }
1508 
1509  DBGC ( gve, "GVE %p failed to get device description: %s\n",
1510  gve, strerror ( rc ) );
1511  return rc;
1512 }
1513 
1514 /** Device startup process descriptor */
1517 
1518 /**
1519  * Probe PCI device
1520  *
1521  * @v pci PCI device
1522  * @ret rc Return status code
1523  */
1524 static int gve_probe ( struct pci_device *pci ) {
1525  struct net_device *netdev;
1526  struct gve_nic *gve;
1527  unsigned long cfg_start;
1528  unsigned long db_start;
1529  unsigned long db_size;
1530  int rc;
1531 
1532  /* Allocate and initialise net device */
1533  netdev = alloc_etherdev ( sizeof ( *gve ) );
1534  if ( ! netdev ) {
1535  rc = -ENOMEM;
1536  goto err_alloc;
1537  }
1539  gve = netdev->priv;
1540  pci_set_drvdata ( pci, netdev );
1541  netdev->dev = &pci->dev;
1542  memset ( gve, 0, sizeof ( *gve ) );
1543  gve->netdev = netdev;
1544  gve->tx.type = &gve_tx_type;
1545  gve->rx.type = &gve_rx_type;
1547  &netdev->refcnt );
1548  timer_init ( &gve->watchdog, gve_watchdog, &netdev->refcnt );
1549 
1550  /* Fix up PCI device */
1551  adjust_pci_device ( pci );
1552 
1553  /* Check PCI revision */
1554  pci_read_config_byte ( pci, PCI_REVISION, &gve->revision );
1555  DBGC ( gve, "GVE %p is revision %#02x\n", gve, gve->revision );
1556 
1557  /* Map configuration registers */
1558  cfg_start = pci_bar_start ( pci, GVE_CFG_BAR );
1559  gve->cfg = pci_ioremap ( pci, cfg_start, GVE_CFG_SIZE );
1560  if ( ! gve->cfg ) {
1561  rc = -ENODEV;
1562  goto err_cfg;
1563  }
1564 
1565  /* Map doorbell registers */
1566  db_start = pci_bar_start ( pci, GVE_DB_BAR );
1567  db_size = pci_bar_size ( pci, GVE_DB_BAR );
1568  gve->db = pci_ioremap ( pci, db_start, db_size );
1569  if ( ! gve->db ) {
1570  rc = -ENODEV;
1571  goto err_db;
1572  }
1573 
1574  /* Configure DMA */
1575  gve->dma = &pci->dma;
1576  dma_set_mask_64bit ( gve->dma );
1577  assert ( netdev->dma == NULL );
1578 
1579  /* Allocate admin queue */
1580  if ( ( rc = gve_admin_alloc ( gve ) ) != 0 )
1581  goto err_admin;
1582 
1583  /* Set up the device */
1584  if ( ( rc = gve_setup ( gve ) ) != 0 )
1585  goto err_setup;
1586 
1587  /* Register network device */
1588  if ( ( rc = register_netdev ( netdev ) ) != 0 )
1589  goto err_register_netdev;
1590 
1591  return 0;
1592 
1594  err_register_netdev:
1595  err_setup:
1596  gve_reset ( gve );
1597  gve_admin_free ( gve );
1598  err_admin:
1599  iounmap ( gve->db );
1600  err_db:
1601  iounmap ( gve->cfg );
1602  err_cfg:
1603  netdev_nullify ( netdev );
1604  netdev_put ( netdev );
1605  err_alloc:
1606  return rc;
1607 }
1608 
1609 /**
1610  * Remove PCI device
1611  *
1612  * @v pci PCI device
1613  */
1614 static void gve_remove ( struct pci_device *pci ) {
1615  struct net_device *netdev = pci_get_drvdata ( pci );
1616  struct gve_nic *gve = netdev->priv;
1617 
1618  /* Unregister network device */
1620 
1621  /* Reset device */
1622  gve_reset ( gve );
1623 
1624  /* Free admin queue */
1625  gve_admin_free ( gve );
1626 
1627  /* Unmap registers */
1628  iounmap ( gve->db );
1629  iounmap ( gve->cfg );
1630 
1631  /* Free network device */
1632  netdev_nullify ( netdev );
1633  netdev_put ( netdev );
1634 }
1635 
1636 /** GVE PCI device IDs */
1637 static struct pci_device_id gve_nics[] = {
1638  PCI_ROM ( 0x1ae0, 0x0042, "gve", "gVNIC", 0 ),
1639 };
1640 
1641 /** GVE PCI driver */
1642 struct pci_driver gve_driver __pci_driver = {
1643  .ids = gve_nics,
1644  .id_count = ( sizeof ( gve_nics ) / sizeof ( gve_nics[0] ) ),
1645  .probe = gve_probe,
1646  .remove = gve_remove,
1647 };
#define cpu_to_be16(value)
Definition: byteswap.h:109
#define iob_pull(iobuf, len)
Definition: iobuf.h:102
#define __attribute__(x)
Definition: compiler.h:10
uint32_t base
Base.
Definition: librm.h:252
#define EINVAL
Invalid argument.
Definition: errno.h:428
#define GVE_PAGE_SIZE
Page size.
Definition: gve.h:42
#define ECONNRESET
Connection reset.
Definition: errno.h:363
struct dma_device * dma
DMA device.
Definition: gve.h:645
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
#define GVE_ADMIN_STATUS_OK
Command succeeded.
Definition: gve.h:113
struct gve_rx_packet pkt
Packet descriptor.
Definition: gve.h:563
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition: netdevice.h:752
wmb()
DMA mappings.
static void gve_stop(struct gve_nic *gve)
Stop device.
Definition: gve.c:1028
#define iob_put(iobuf, len)
Definition: iobuf.h:120
struct dma_device dma
DMA device.
Definition: pci.h:210
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:586
A receive descriptor.
Definition: gve.h:534
void netdev_tx_defer(struct net_device *netdev, struct io_buffer *iobuf)
Defer transmitted packet.
Definition: netdevice.c:412
A PCI driver.
Definition: pci.h:247
static void gve_refill_rx(struct net_device *netdev)
Refill receive queue.
Definition: gve.c:1399
Create receive queue command.
Definition: gve.h:241
static int gve_describe(struct gve_nic *gve)
Get device descriptor.
Definition: gve.c:419
#define GVE_CFG_ADMIN_PFN
Admin queue page frame number (for older devices)
Definition: gve.h:75
#define GVE_ADMIN_DESTROY_RX
Destroy receive queue command.
Definition: gve.h:270
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
static int gve_alloc_queue(struct gve_nic *gve, struct gve_queue *queue)
Allocate descriptor queue.
Definition: gve.c:845
uint8_t opcode
Opcode.
Definition: ena.h:16
struct dma_mapping map
DMA mapping.
Definition: gve.h:349
struct gve_event * event
Event counters.
Definition: gve.h:347
#define GVE_ADMIN_DESCRIBE
Describe device command.
Definition: gve.h:124
union gve_scratch::@46 * buf
Buffer contents.
static void gve_startup(struct gve_nic *gve)
Device startup process.
Definition: gve.c:1049
#define GVE_RX_IRQ
Receive queue interrupt channel.
Definition: gve.h:531
#define GVE_CFG_DEVSTAT
Device status.
Definition: gve.h:64
Error codes.
static int gve_destroy_queue(struct gve_nic *gve, struct gve_queue *queue)
Destroy transmit or receive queue.
Definition: gve.c:656
static int gve_alloc_shared(struct gve_nic *gve)
Allocate shared queue resources.
Definition: gve.c:680
struct gve_device_descriptor desc
Device descriptor.
Definition: gve.h:324
I/O buffers.
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:249
#define GVE_CFG_BAR
Configuration BAR.
Definition: gve.h:54
uint32_t type
Operating system type.
Definition: ena.h:12
static int gve_deconfigure(struct gve_nic *gve)
Deconfigure device resources.
Definition: gve.c:504
size_t mtu
Maximum transmission unit length.
Definition: netdevice.h:415
static __always_inline void copy_from_user(void *dest, userptr_t src, off_t src_off, size_t len)
Copy data from user buffer.
Definition: uaccess.h:411
#define GVE_RXF_MORE
Receive packet continues into next descriptor.
Definition: gve.h:553
#define GVE_CFG_DRVSTAT
Driver status.
Definition: gve.h:68
physaddr_t dma_phys(struct dma_mapping *map, physaddr_t addr)
Get DMA address from physical address.
Create transmit queue command.
Definition: gve.h:220
uint32_t readl(volatile uint32_t *io_addr)
Read 32-bit dword from memory-mapped device.
unsigned long user_to_phys(userptr_t userptr, off_t offset)
Convert user pointer to physical address.
uint64_t desc
Microcode descriptor list physical address.
Definition: ucode.h:12
static int gve_admin(struct gve_nic *gve)
Issue admin queue command.
Definition: gve.c:337
#define DBGC(...)
Definition: compiler.h:505
static int gve_admin_alloc(struct gve_nic *gve)
Allocate admin queue.
Definition: gve.c:191
Simple admin command.
Definition: gve.h:116
A process descriptor.
Definition: process.h:31
#define GVE_ALIGN
Address alignment.
Definition: gve.h:51
A retry timer.
Definition: retry.h:21
long index
Definition: bigint.h:62
uint64_t addr[GVE_QPL_MAX]
Page address.
Definition: gve.h:210
static __always_inline void dma_set_mask_64bit(struct dma_device *dma)
Set 64-bit addressable space mask.
Definition: dma.h:474
unsigned long long uint64_t
Definition: stdint.h:13
static void gve_create_rx_param(struct gve_queue *queue, union gve_admin_command *cmd)
Construct command to create receive queue.
Definition: gve.c:593
A receive completion descriptor.
Definition: gve.h:559
#define GVE_ADMIN_DESTROY_TX
Destroy transmit queue command.
Definition: gve.h:267
struct dma_device * dma
DMA device.
Definition: netdevice.h:366
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:230
#define GVE_BUF_PER_PAGE
Number of data buffers per page.
Definition: gve.h:424
#define PROC_DESC_ONCE(object_type, process, _step)
Define a process descriptor for a process that runs only once.
Definition: process.h:97
#define GVE_IRQ_DISABLE
Disable interrupts.
Definition: gve.h:389
#define offsetof(type, field)
Get offset of a field within a structure.
Definition: stddef.h:24
A transmit or receive buffer descriptor.
Definition: gve.h:485
static void gve_remove(struct pci_device *pci)
Remove PCI device.
Definition: gve.c:1614
#define GVE_WATCHDOG_TIMEOUT
Time between reset recovery checks.
Definition: gve.h:682
const char * name
Name.
Definition: gve.h:609
Device descriptor.
Definition: gve.h:142
#define GVE_ADMIN_REGISTER
Register page list command.
Definition: gve.h:183
void process_del(struct process *process)
Remove process from process list.
Definition: process.c:79
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:154
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:129
static void gve_free_queue(struct gve_nic *gve, struct gve_queue *queue)
Free descriptor queue.
Definition: gve.c:939
static __always_inline unsigned long virt_to_phys(volatile const void *addr)
Convert virtual address to a physical address.
Definition: uaccess.h:361
static int gve_register(struct gve_nic *gve, struct gve_qpl *qpl)
Register queue page list.
Definition: gve.c:521
uint16_t len
Length.
Definition: gve.h:542
struct gve_queue rx
Receive queue.
Definition: gve.h:659
struct device dev
Generic device.
Definition: pci.h:208
#define GVE_TX_TYPE_CONT
Continuation of packet transmit descriptor type.
Definition: gve.h:516
static struct net_device_operations gve_operations
GVE network device operations.
Definition: gve.c:1440
struct gve_queue tx
Transmit queue.
Definition: gve.h:657
#define ECANCELED
Operation canceled.
Definition: errno.h:343
u16 seq
802.11 Sequence Control field
Definition: ieee80211.h:19
#define GVE_TX_FILL
Maximum number of transmit buffers.
Definition: gve.h:476
static const struct gve_queue_type gve_tx_type
Transmit descriptor queue type.
Definition: gve.c:1455
A timer.
Definition: timer.h:28
static int gve_start(struct gve_nic *gve)
Start up device.
Definition: gve.c:965
#define GVE_DB_BAR
Doorbell BAR.
Definition: gve.h:96
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:515
void memset_user(userptr_t userptr, off_t offset, int c, size_t len)
Fill user buffer with a constant byte.
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:359
struct dma_mapping map
Page mapping.
Definition: gve.h:464
#define rmb()
Definition: io.h:484
#define ENOMEM
Not enough space.
Definition: errno.h:534
uint32_t activity
Reset recovery recorded activity counter.
Definition: gve.h:672
void * db
Doorbell registers.
Definition: gve.h:639
userptr_t userptr_add(userptr_t userptr, off_t offset)
Add offset to user pointer.
void * memcpy(void *dest, const void *src, size_t len) __nonnull
struct retry_timer watchdog
Reset recovery watchdog timer.
Definition: gve.h:670
uint32_t userptr_t
A pointer to a user buffer.
Definition: libkir.h:159
An admin queue command.
Definition: gve.h:276
#define GVE_CFG_ADMIN_DB
Admin queue doorbell.
Definition: gve.h:78
#define ETH_HLEN
Definition: if_ether.h:9
void dma_free(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
Assertions.
#define GVE_RX_SEQ_MASK
Receive sequence number mask.
Definition: gve.h:556
struct dma_mapping map
DMA mapping.
Definition: gve.h:329
#define be32_to_cpu(value)
Definition: byteswap.h:116
struct process startup
Startup process.
Definition: gve.h:666
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
Google Virtual Ethernet network driver.
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:572
#define GVE_RX_FILL
Maximum number of receive buffers.
Definition: gve.h:525
#define container_of(ptr, type, field)
Get containing structure.
Definition: stddef.h:35
Ethernet protocol.
static userptr_t gve_buffer(struct gve_queue *queue, unsigned int index)
Get buffer address.
Definition: gve.c:807
uint8_t seq
Sequence number.
Definition: gve.h:546
#define GVE_ADMIN_CREATE_TX
Create transmit queue command.
Definition: gve.h:217
#define VM_MIGRATED_RATE
Definition: fault.h:33
#define GVE_CFG_ADMIN_LEN
Admin queue base address length (16-bit register)
Definition: gve.h:93
void * priv
Driver private data.
Definition: netdevice.h:431
static int gve_reset(struct gve_nic *gve)
Reset hardware.
Definition: gve.c:139
static void gve_free_qpl(struct gve_nic *nic __unused, struct gve_qpl *qpl)
Free queue page list.
Definition: gve.c:774
#define bswap_16(value)
Definition: byteswap.h:58
static int gve_admin_simple(struct gve_nic *gve, unsigned int opcode, unsigned int id)
Issue simple admin queue command.
Definition: gve.c:396
#define DBGC_HDA(...)
Definition: compiler.h:506
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
uint32_t cons
Consumer counter.
Definition: gve.h:600
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:774
void writel(uint32_t data, volatile uint32_t *io_addr)
Write 32-bit dword to memory-mapped device.
volatile uint32_t * db[GVE_IRQ_COUNT]
Interrupt doorbells.
Definition: gve.h:385
static void gve_poll_rx(struct net_device *netdev)
Poll for received packets.
Definition: gve.c:1308
#define build_assert(condition)
Assert a condition at build time (after dead code elimination)
Definition: assert.h:76
uint32_t prod
Producer counter.
Definition: gve.h:314
static int netdev_link_ok(struct net_device *netdev)
Check link state of network device.
Definition: netdevice.h:636
static struct net_device * netdev
Definition: gdbudp.c:52
unsigned int count
Number of pages.
Definition: gve.h:466
#define be16_to_cpu(value)
Definition: byteswap.h:115
Queue page list.
Definition: gve.h:460
uint16_t count
Number of entries.
Definition: ena.h:22
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
#define GVE_TX_TYPE_START
Start of packet transmit descriptor type.
Definition: gve.h:513
struct gve_irqs irqs
Interrupt channels.
Definition: gve.h:650
#define GVE_TX_QPL
Transmit queue page list ID.
Definition: gve.h:479
static void gve_create_tx_param(struct gve_queue *queue, union gve_admin_command *cmd)
Construct command to create transmit queue.
Definition: gve.c:574
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition: netdevice.c:941
static int gve_create_queue(struct gve_nic *gve, struct gve_queue *queue)
Create transmit or receive queue.
Definition: gve.c:616
A descriptor queue.
Definition: gve.h:570
#define bswap_32(value)
Definition: byteswap.h:70
uint8_t id
Request identifier.
Definition: ena.h:12
void process_add(struct process *process)
Add process to process list.
Definition: process.c:59
#define DBGC2_HDA(...)
Definition: compiler.h:523
uint8_t flags
Flags.
Definition: gve.h:544
void * cfg
Configuration registers.
Definition: gve.h:637
#define GVE_CFG_ADMIN_BASE_LO
Admin queue base address low 32 bits.
Definition: gve.h:90
#define GVE_RESET_MAX_WAIT_MS
Maximum time to wait for reset.
Definition: gve.h:72
char * strerror(int errno)
Retrieve string representation of error number.
Definition: strerror.c:78
struct refcnt refcnt
Reference counter.
Definition: netdevice.h:354
static int gve_alloc_qpl(struct gve_nic *gve, struct gve_qpl *qpl, uint32_t id, unsigned int buffers)
Allocate queue page list.
Definition: gve.c:744
uint16_t cons
Consumer index.
Definition: ena.h:22
static void gve_restart(struct gve_nic *gve)
Trigger startup process.
Definition: gve.c:1086
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Find the size of a PCI BAR.
Definition: pciextra.c:92
PCI bus.
static void gve_poll_tx(struct net_device *netdev)
Poll for completed transmissions.
Definition: gve.c:1283
A PCI device.
Definition: pci.h:206
int register_netdev(struct net_device *netdev)
Register network device.
Definition: netdevice.c:759
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:155
const char * eth_ntoa(const void *ll_addr)
Transcribe Ethernet address.
Definition: ethernet.c:175
Event counter array.
Definition: gve.h:345
static __always_inline void copy_to_user(userptr_t dest, off_t dest_off, const void *src, size_t len)
Copy data to user buffer.
Definition: uaccess.h:398
A network device.
Definition: netdevice.h:352
void netdev_link_err(struct net_device *netdev, int rc)
Mark network device as having a specific link state.
Definition: netdevice.c:207
#define ENODEV
No such device.
Definition: errno.h:509
char * inet_ntoa(struct in_addr in)
Convert IPv4 address to dotted-quad notation.
Definition: ipv4.c:668
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:528
#define GVE_ADMIN_DECONFIGURE
Deconfigure device resources command.
Definition: gve.h:273
A Google Virtual Ethernet NIC.
Definition: gve.h:635
struct io_buffer * tx_iobuf[GVE_TX_FILL]
Transmit I/O buffers.
Definition: gve.h:661
static void gve_watchdog(struct retry_timer *timer, int over __unused)
Reset recovery watchdog.
Definition: gve.c:1102
static void gve_close(struct net_device *netdev)
Close network device.
Definition: gve.c:1182
static size_t gve_address(struct gve_queue *queue, unsigned int index)
Get buffer address (within queue page list address space)
Definition: gve.c:790
struct gve_pages pages
Page address list.
Definition: gve.h:326
static void process_init_stopped(struct process *process, struct process_descriptor *desc, struct refcnt *refcnt)
Initialise process without adding to process list.
Definition: process.h:145
#define ETH_ALEN
Definition: if_ether.h:8
A PCI device ID list entry.
Definition: pci.h:170
#define EIO_ADMIN(status)
Definition: gve.c:116
static int gve_open(struct net_device *netdev)
Open network device.
Definition: gve.c:1142
Definition: nic.h:49
unsigned int uint32_t
Definition: stdint.h:12
unsigned int seq
Receive sequence number.
Definition: gve.h:663
#define GVE_RX_QPL
Receive queue page list ID.
Definition: gve.h:528
#define ENETDOWN
Network is down.
Definition: errno.h:478
static struct xen_remove_from_physmap * remove
Definition: xenmem.h:39
uint8_t status
Status.
Definition: ena.h:16
Network device operations.
Definition: netdevice.h:213
uint8_t desc_len
Descriptor size.
Definition: gve.h:625
#define GVE_TX_IRQ
Tranmsit queue interrupt channel.
Definition: gve.h:482
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:548
struct device * dev
Underlying hardware device.
Definition: netdevice.h:364
#define GVE_QPL_MAX
Maximum number of pages per queue.
Definition: gve.h:205
void * dma_alloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer.
Network device management.
void start_timer_fixed(struct retry_timer *timer, unsigned long timeout)
Start timer with a specified timeout.
Definition: retry.c:64
unsigned long physaddr_t
Definition: stdint.h:20
unsigned int id
Queue page list ID.
Definition: gve.h:468
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:369
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition: timer.c:78
#define cpu_to_be32(value)
Definition: byteswap.h:110
#define GVE_ADMIN_CONFIGURE
Configure device resources command.
Definition: gve.h:164
void stop_timer(struct retry_timer *timer)
Stop timer.
Definition: retry.c:117
void dma_ufree(struct dma_mapping *map, userptr_t addr, size_t len)
Unmap and free DMA-coherent buffer from external (user) memory.
void netdev_tx_complete_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Complete network transmission.
Definition: netdevice.c:470
#define GVE_ADMIN_MAX_WAIT_MS
Maximum time to wait for admin queue commands.
Definition: gve.h:676
#define GVE_CFG_ADMIN_BASE_HI
Admin queue base address high 32 bits.
Definition: gve.h:87
static unsigned int gve_next(unsigned int seq)
Calculate next receive sequence number.
Definition: gve.c:820
unsigned int count
Number of descriptors (must be a power of two)
Definition: gve.h:581
static struct pci_device_id gve_nics[]
GVE PCI device IDs.
Definition: gve.c:1637
struct gve_events events
Event counters.
Definition: gve.h:652
#define GVE_ADMIN_COUNT
Number of admin queue commands.
Definition: gve.h:307
#define GVE_CFG_ADMIN_EVT
Admin queue event counter.
Definition: gve.h:81
#define DBGC2(...)
Definition: compiler.h:522
#define GVE_IRQ_COUNT
Number of interrupt channels.
Definition: gve.h:376
int(* probe)(struct pci_device *pci)
Probe device.
Definition: pci.h:260
userptr_t data
Page addresses.
Definition: gve.h:462
#define GVE_CFG_DRVSTAT_RUN
Run admin queue.
Definition: gve.h:69
static int gve_transmit(struct net_device *netdev, struct io_buffer *iobuf)
Transmit packet.
Definition: gve.c:1212
uint64_t addr
Address (within queue page list address space)
Definition: gve.h:487
Interrupt channel array.
Definition: gve.h:379
unsigned int retries
Startup process retry counter.
Definition: gve.h:668
void * data
Start of data.
Definition: iobuf.h:48
unsigned int count
Actual number of event counters.
Definition: gve.h:351
u32 addr
Definition: sky2.h:8
#define EIO
Input/output error.
Definition: errno.h:433
#define GVE_ADMIN_CREATE_RX
Create receive queue command.
Definition: gve.h:238
uint8_t revision
PCI revision.
Definition: gve.h:641
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition: ethernet.c:264
u8 rx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets from the AP.
Definition: wpa.h:234
static void gve_admin_enable(struct gve_nic *gve)
Enable admin queue.
Definition: gve.c:250
static union gve_admin_command * gve_admin_command(struct gve_nic *gve)
Get next available admin queue command slot.
Definition: gve.c:280
void iounmap(volatile const void *io_addr)
Unmap I/O address.
#define cpu_to_be64(value)
Definition: byteswap.h:111
struct pci_driver gve_driver __pci_driver
GVE PCI driver.
Definition: gve.c:1642
#define PCI_REVISION
PCI revision.
Definition: pci.h:44
static int gve_unregister(struct gve_nic *gve, struct gve_qpl *qpl)
Unregister page list.
Definition: gve.c:556
static int gve_admin_wait(struct gve_nic *gve)
Wait for admin queue command to complete.
Definition: gve.c:301
static const struct gve_queue_type gve_rx_type
Receive descriptor queue type.
Definition: gve.c:1467
Page list.
Definition: gve.h:208
userptr_t dma_umalloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer from external (user) memory.
uint32_t db_idx
Interrupt doorbell index (within doorbell BAR)
Definition: gve.h:357
Scratch buffer for admin queue commands.
Definition: gve.h:320
static int gve_probe(struct pci_device *pci)
Probe PCI device.
Definition: gve.c:1524
uint16_t offset
Offset to command line.
Definition: bzimage.h:8
#define GVE_ADMIN_UNREGISTER
Unregister page list command.
Definition: gve.h:214
Fault injection.
struct net_device * netdev
Network device.
Definition: gve.h:643
typeof(acpi_finder=acpi_find)
ACPI table finder.
Definition: acpi.c:45
struct dma_mapping map
DMA mapping.
Definition: gve.h:383
struct gve_irq * irq
Interrupt channels.
Definition: gve.h:381
#define GVE_ADMIN_DESCRIBE_VER
Device descriptor version.
Definition: gve.h:139
Admin queue.
Definition: gve.h:310
#define GVE_BUF_SIZE
Queue data buffer size.
Definition: gve.h:421
static int gve_configure(struct gve_nic *gve)
Configure device resources.
Definition: gve.c:463
static void gve_admin_free(struct gve_nic *gve)
Free admin queue.
Definition: gve.c:232
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
union gve_admin_command * cmd
Commands.
Definition: gve.h:312
A descriptor queue type.
Definition: gve.h:607
size_t max_pkt_len
Maximum packet length.
Definition: netdevice.h:409
uint8_t cmplt_len
Completion size.
Definition: gve.h:627
struct dma_mapping map
DMA mapping.
Definition: gve.h:316
static int gve_setup(struct gve_nic *gve)
Set up admin queue and get device description.
Definition: gve.c:1485
#define GVE_RXF_ERROR
Receive error.
Definition: gve.h:550
static __always_inline physaddr_t dma(struct dma_mapping *map, void *addr)
Get DMA address from virtual address.
Definition: dma.h:436
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
const struct gve_queue_type * type
Queue type.
Definition: gve.h:579
struct gve_admin admin
Admin queue.
Definition: gve.h:648
uint16_t queue
Queue ID.
Definition: ena.h:22
uint32_t prod
Producer counter.
Definition: gve.h:598
uint32_t len
Length.
Definition: ena.h:14
uint8_t hw_addr[MAX_HW_ADDR_LEN]
Hardware address.
Definition: netdevice.h:381
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
#define ETIMEDOUT
Connection timed out.
Definition: errno.h:669
String functions.
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition: pci.h:303
#define GVE_RX_PAD
Padding at the start of all received packets.
Definition: gve.h:567
uint8_t create
Command to create queue.
Definition: gve.h:629
A transmit descriptor.
Definition: gve.h:505
#define GVE_RESET_MAX_RETRY
Maximum number of times to reattempt device reset.
Definition: gve.h:679
static void gve_poll(struct net_device *netdev)
Poll for completed and received packets.
Definition: gve.c:1423
void startup(void)
Start up iPXE.
Definition: init.c:67
struct gve_scratch scratch
Scratch buffer.
Definition: gve.h:654
static struct process_descriptor gve_startup_desc
Device startup process descriptor.
Definition: gve.c:1515
static void gve_free_shared(struct gve_nic *gve)
Free shared queue resources.
Definition: gve.c:722
u8 tx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets to the AP.
Definition: wpa.h:237
A DMA-capable device.
Definition: dma.h:47
#define GVE_CFG_SIZE
Configuration BAR size.
Definition: gve.h:61
void * memset(void *dest, int character, size_t len) __nonnull
int pci_read_config_byte(struct pci_device *pci, unsigned int where, uint8_t *value)
Read byte from PCI configuration space.
A persistent I/O buffer.
Definition: iobuf.h:33