iPXE
gve.c
Go to the documentation of this file.
1/*
2 * Copyright (C) 2024 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26#include <stdint.h>
27#include <string.h>
28#include <stdio.h>
29#include <unistd.h>
30#include <errno.h>
31#include <assert.h>
32#include <byteswap.h>
33#include <ipxe/netdevice.h>
34#include <ipxe/ethernet.h>
35#include <ipxe/if_ether.h>
36#include <ipxe/iobuf.h>
37#include <ipxe/dma.h>
38#include <ipxe/pci.h>
39#include <ipxe/fault.h>
40#include "gve.h"
41
42/** @file
43 *
44 * Google Virtual Ethernet network driver
45 *
46 */
47
48/* Disambiguate the various error causes */
49#define EINFO_EIO_ADMIN_UNSET \
50 __einfo_uniqify ( EINFO_EIO, 0x00, "Uncompleted" )
51#define EIO_ADMIN_UNSET \
52 __einfo_error ( EINFO_EIO_ADMIN_UNSET )
53#define EINFO_EIO_ADMIN_ABORTED \
54 __einfo_uniqify ( EINFO_EIO, 0x10, "Aborted" )
55#define EIO_ADMIN_ABORTED \
56 __einfo_error ( EINFO_EIO_ADMIN_ABORTED )
57#define EINFO_EIO_ADMIN_EXISTS \
58 __einfo_uniqify ( EINFO_EIO, 0x11, "Already exists" )
59#define EIO_ADMIN_EXISTS \
60 __einfo_error ( EINFO_EIO_ADMIN_EXISTS )
61#define EINFO_EIO_ADMIN_CANCELLED \
62 __einfo_uniqify ( EINFO_EIO, 0x12, "Cancelled" )
63#define EIO_ADMIN_CANCELLED \
64 __einfo_error ( EINFO_EIO_ADMIN_CANCELLED )
65#define EINFO_EIO_ADMIN_DATALOSS \
66 __einfo_uniqify ( EINFO_EIO, 0x13, "Data loss" )
67#define EIO_ADMIN_DATALOSS \
68 __einfo_error ( EINFO_EIO_ADMIN_DATALOSS )
69#define EINFO_EIO_ADMIN_DEADLINE \
70 __einfo_uniqify ( EINFO_EIO, 0x14, "Deadline exceeded" )
71#define EIO_ADMIN_DEADLINE \
72 __einfo_error ( EINFO_EIO_ADMIN_DEADLINE )
73#define EINFO_EIO_ADMIN_PRECONDITION \
74 __einfo_uniqify ( EINFO_EIO, 0x15, "Failed precondition" )
75#define EIO_ADMIN_PRECONDITION \
76 __einfo_error ( EINFO_EIO_ADMIN_PRECONDITION )
77#define EINFO_EIO_ADMIN_INTERNAL \
78 __einfo_uniqify ( EINFO_EIO, 0x16, "Internal error" )
79#define EIO_ADMIN_INTERNAL \
80 __einfo_error ( EINFO_EIO_ADMIN_INTERNAL )
81#define EINFO_EIO_ADMIN_INVAL \
82 __einfo_uniqify ( EINFO_EIO, 0x17, "Invalid argument" )
83#define EIO_ADMIN_INVAL \
84 __einfo_error ( EINFO_EIO_ADMIN_INVAL )
85#define EINFO_EIO_ADMIN_NOT_FOUND \
86 __einfo_uniqify ( EINFO_EIO, 0x18, "Not found" )
87#define EIO_ADMIN_NOT_FOUND \
88 __einfo_error ( EINFO_EIO_ADMIN_NOT_FOUND )
89#define EINFO_EIO_ADMIN_RANGE \
90 __einfo_uniqify ( EINFO_EIO, 0x19, "Out of range" )
91#define EIO_ADMIN_RANGE \
92 __einfo_error ( EINFO_EIO_ADMIN_RANGE )
93#define EINFO_EIO_ADMIN_PERM \
94 __einfo_uniqify ( EINFO_EIO, 0x1a, "Permission denied" )
95#define EIO_ADMIN_PERM \
96 __einfo_error ( EINFO_EIO_ADMIN_PERM )
97#define EINFO_EIO_ADMIN_UNAUTH \
98 __einfo_uniqify ( EINFO_EIO, 0x1b, "Unauthenticated" )
99#define EIO_ADMIN_UNAUTH \
100 __einfo_error ( EINFO_EIO_ADMIN_UNAUTH )
101#define EINFO_EIO_ADMIN_RESOURCE \
102 __einfo_uniqify ( EINFO_EIO, 0x1c, "Resource exhausted" )
103#define EIO_ADMIN_RESOURCE \
104 __einfo_error ( EINFO_EIO_ADMIN_RESOURCE )
105#define EINFO_EIO_ADMIN_UNAVAIL \
106 __einfo_uniqify ( EINFO_EIO, 0x1d, "Unavailable" )
107#define EIO_ADMIN_UNAVAIL \
108 __einfo_error ( EINFO_EIO_ADMIN_UNAVAIL )
109#define EINFO_EIO_ADMIN_NOTSUP \
110 __einfo_uniqify ( EINFO_EIO, 0x1e, "Unimplemented" )
111#define EIO_ADMIN_NOTSUP \
112 __einfo_error ( EINFO_EIO_ADMIN_NOTSUP )
113#define EINFO_EIO_ADMIN_UNKNOWN \
114 __einfo_uniqify ( EINFO_EIO, 0x1f, "Unknown error" )
115#define EIO_ADMIN_UNKNOWN \
116 __einfo_error ( EINFO_EIO_ADMIN_UNKNOWN )
117#define EIO_ADMIN( status ) \
118 EUNIQ ( EINFO_EIO, ( (status) & 0x1f ), \
119 EIO_ADMIN_UNSET, EIO_ADMIN_ABORTED, EIO_ADMIN_EXISTS, \
120 EIO_ADMIN_CANCELLED, EIO_ADMIN_DATALOSS, \
121 EIO_ADMIN_DEADLINE, EIO_ADMIN_PRECONDITION, \
122 EIO_ADMIN_INTERNAL, EIO_ADMIN_INVAL, \
123 EIO_ADMIN_NOT_FOUND, EIO_ADMIN_RANGE, EIO_ADMIN_PERM, \
124 EIO_ADMIN_UNAUTH, EIO_ADMIN_RESOURCE, \
125 EIO_ADMIN_UNAVAIL, EIO_ADMIN_NOTSUP, EIO_ADMIN_UNKNOWN )
126
127/******************************************************************************
128 *
129 * Buffer layout
130 *
131 ******************************************************************************
132 */
133
134/**
135 * Get buffer offset (within queue page list allocation)
136 *
137 * @v queue Descriptor queue
138 * @v tag Buffer tag
139 * @ret addr Buffer address within queue page list address space
140 */
141static inline __attribute__ (( always_inline)) size_t
142gve_offset ( struct gve_queue *queue, unsigned int tag ) {
143
144 /* We allocate sufficient pages for the maximum fill level of
145 * buffers, and reuse the buffers in strict rotation as they
146 * are released by the hardware.
147 */
149 return ( tag * GVE_BUF_SIZE );
150}
151
152/**
153 * Get buffer address (within queue page list address space)
154 *
155 * @v queue Descriptor queue
156 * @v tag Buffer tag
157 * @ret addr Buffer address within queue page list address space
158 */
159static inline __attribute__ (( always_inline)) physaddr_t
160gve_address ( struct gve_queue *queue, unsigned int tag ) {
161
162 /* Pages are allocated as a single contiguous block */
163 return ( queue->qpl.base + gve_offset ( queue, tag ) );
164}
165
166/**
167 * Get buffer address
168 *
169 * @v queue Descriptor queue
170 * @v tag Buffer tag
171 * @ret addr Buffer address
172 */
173static inline __attribute__ (( always_inline )) void *
174gve_buffer ( struct gve_queue *queue, unsigned int tag ) {
175
176 /* Pages are allocated as a single contiguous block */
177 return ( queue->qpl.data + gve_offset ( queue, tag ) );
178}
179
180/******************************************************************************
181 *
182 * Device reset
183 *
184 ******************************************************************************
185 */
186
187/**
188 * Reset hardware
189 *
190 * @v gve GVE device
191 * @ret rc Return status code
192 */
193static int gve_reset ( struct gve_nic *gve ) {
194 uint32_t pfn;
195 unsigned int i;
196
197 /* Skip reset if admin queue page frame number is already
198 * clear. Triggering a reset on an already-reset device seems
199 * to cause a delayed reset to be scheduled. This can cause
200 * the device to end up in a reset loop, where each attempt to
201 * recover from reset triggers another reset a few seconds
202 * later.
203 */
204 pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
205 if ( ! pfn ) {
206 DBGC ( gve, "GVE %p skipping reset\n", gve );
207 return 0;
208 }
209
210 /* Clear admin queue page frame number */
211 writel ( 0, gve->cfg + GVE_CFG_ADMIN_PFN );
212 wmb();
213
214 /* Wait for device to reset */
215 for ( i = 0 ; i < GVE_RESET_MAX_WAIT_MS ; i++ ) {
216
217 /* Delay */
218 mdelay ( 1 );
219
220 /* Check for reset completion */
221 pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
222 if ( ! pfn )
223 return 0;
224 }
225
226 DBGC ( gve, "GVE %p reset timed out (PFN %#08x devstat %#08x)\n",
227 gve, bswap_32 ( pfn ),
228 bswap_32 ( readl ( gve->cfg + GVE_CFG_DEVSTAT ) ) );
229 return -ETIMEDOUT;
230}
231
232/******************************************************************************
233 *
234 * Admin queue
235 *
236 ******************************************************************************
237 */
238
239/**
240 * Get operating mode name (for debugging)
241 *
242 * @v mode Operating mode
243 * @ret name Mode name
244 */
245static inline const char * gve_mode_name ( unsigned int mode ) {
246 static char buf[ 8 /* "XXX-XXX" + NUL */ ];
247
248 snprintf ( buf, sizeof ( buf ), "%s-%s",
249 ( ( mode & GVE_MODE_DQO ) ? "DQO" : "GQI" ),
250 ( ( mode & GVE_MODE_QPL ) ? "QPL" : "RDA" ) );
251 return buf;
252}
253
254/**
255 * Allocate admin queue
256 *
257 * @v gve GVE device
258 * @ret rc Return status code
259 */
260static int gve_admin_alloc ( struct gve_nic *gve ) {
261 struct dma_device *dma = gve->dma;
262 struct gve_admin *admin = &gve->admin;
263 struct gve_scratch *scratch = &gve->scratch;
264 size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) );
265 size_t scratch_len = sizeof ( *scratch->buf );
266 int rc;
267
268 /* Allocate admin queue */
269 admin->cmd = dma_alloc ( dma, &admin->map, admin_len, GVE_ALIGN );
270 if ( ! admin->cmd ) {
271 rc = -ENOMEM;
272 goto err_admin;
273 }
274
275 /* Allocate scratch buffer */
276 scratch->buf = dma_alloc ( dma, &scratch->map, scratch_len, GVE_ALIGN );
277 if ( ! scratch->buf ) {
278 rc = -ENOMEM;
279 goto err_scratch;
280 }
281
282 DBGC ( gve, "GVE %p AQ at [%08lx,%08lx) scratch [%08lx,%08lx)\n",
283 gve, virt_to_phys ( admin->cmd ),
284 ( virt_to_phys ( admin->cmd ) + admin_len ),
285 virt_to_phys ( scratch->buf ),
286 ( virt_to_phys ( scratch->buf ) + scratch_len ) );
287 return 0;
288
289 dma_free ( &scratch->map, scratch->buf, scratch_len );
290 err_scratch:
291 dma_free ( &admin->map, admin->cmd, admin_len );
292 err_admin:
293 return rc;
294}
295
296/**
297 * Free admin queue
298 *
299 * @v gve GVE device
300 */
301static void gve_admin_free ( struct gve_nic *gve ) {
302 struct gve_admin *admin = &gve->admin;
303 struct gve_scratch *scratch = &gve->scratch;
304 size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) );
305 size_t scratch_len = sizeof ( *scratch->buf );
306
307 /* Free scratch buffer */
308 dma_free ( &scratch->map, scratch->buf, scratch_len );
309
310 /* Free admin queue */
311 dma_free ( &admin->map, admin->cmd, admin_len );
312}
313
314/**
315 * Enable admin queue
316 *
317 * @v gve GVE device
318 */
319static void gve_admin_enable ( struct gve_nic *gve ) {
320 struct gve_admin *admin = &gve->admin;
321 size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) );
323
324 /* Reset queue */
325 admin->prod = 0;
326
327 /* Program queue addresses and capabilities */
328 base = dma ( &admin->map, admin->cmd );
330 gve->cfg + GVE_CFG_ADMIN_PFN );
331 writel ( bswap_32 ( base & 0xffffffffUL ),
332 gve->cfg + GVE_CFG_ADMIN_BASE_LO );
333 if ( sizeof ( base ) > sizeof ( uint32_t ) ) {
334 writel ( bswap_32 ( ( ( uint64_t ) base ) >> 32 ),
335 gve->cfg + GVE_CFG_ADMIN_BASE_HI );
336 } else {
337 writel ( 0, gve->cfg + GVE_CFG_ADMIN_BASE_HI );
338 }
339 writel ( bswap_16 ( admin_len ), gve->cfg + GVE_CFG_ADMIN_LEN );
341}
342
343/**
344 * Get next available admin queue command slot
345 *
346 * @v gve GVE device
347 * @ret cmd Admin queue command
348 */
349static union gve_admin_command * gve_admin_command ( struct gve_nic *gve ) {
350 struct gve_admin *admin = &gve->admin;
351 union gve_admin_command *cmd;
352 unsigned int index;
353
354 /* Get next command slot */
355 index = admin->prod;
356 cmd = &admin->cmd[ index % GVE_ADMIN_COUNT ];
357
358 /* Initialise request */
359 memset ( cmd, 0, sizeof ( *cmd ) );
360
361 return cmd;
362}
363
364/**
365 * Wait for admin queue command to complete
366 *
367 * @v gve GVE device
368 * @ret rc Return status code
369 */
370static int gve_admin_wait ( struct gve_nic *gve ) {
371 struct gve_admin *admin = &gve->admin;
372 uint32_t evt;
373 uint32_t pfn;
374 unsigned int i;
375
376 /* Wait for any outstanding commands to complete */
377 for ( i = 0 ; i < GVE_ADMIN_MAX_WAIT_MS ; i++ ) {
378
379 /* Check event counter */
380 rmb();
381 evt = bswap_32 ( readl ( gve->cfg + GVE_CFG_ADMIN_EVT ) );
382 if ( evt == admin->prod )
383 return 0;
384
385 /* Check for device reset */
386 pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
387 if ( ! pfn )
388 break;
389
390 /* Delay */
391 mdelay ( 1 );
392 }
393
394 DBGC ( gve, "GVE %p AQ %#02x %s (completed %#02x, status %#08x)\n",
395 gve, admin->prod, ( pfn ? "timed out" : "saw reset" ), evt,
396 bswap_32 ( readl ( gve->cfg + GVE_CFG_DEVSTAT ) ) );
397 return ( pfn ? -ETIMEDOUT : -ECONNRESET );
398}
399
400/**
401 * Issue admin queue command
402 *
403 * @v gve GVE device
404 * @ret rc Return status code
405 */
406static int gve_admin ( struct gve_nic *gve ) {
407 struct gve_admin *admin = &gve->admin;
408 union gve_admin_command *cmd;
409 unsigned int index;
412 int rc;
413
414 /* Ensure admin queue is idle */
415 if ( ( rc = gve_admin_wait ( gve ) ) != 0 )
416 return rc;
417
418 /* Get next command slot */
419 index = admin->prod;
420 cmd = &admin->cmd[ index % GVE_ADMIN_COUNT ];
421 opcode = cmd->hdr.opcode;
422 DBGC2 ( gve, "GVE %p AQ %#02x command %#04x request:\n",
423 gve, index, opcode );
424 DBGC2_HDA ( gve, 0, cmd, sizeof ( *cmd ) );
425
426 /* Increment producer counter */
427 admin->prod++;
428
429 /* Ring doorbell */
430 wmb();
431 writel ( bswap_32 ( admin->prod ), gve->cfg + GVE_CFG_ADMIN_DB );
432
433 /* Wait for command to complete */
434 if ( ( rc = gve_admin_wait ( gve ) ) != 0 )
435 return rc;
436
437 /* Check command status */
438 status = be32_to_cpu ( cmd->hdr.status );
439 if ( status != GVE_ADMIN_STATUS_OK ) {
440 rc = -EIO_ADMIN ( status );
441 DBGC ( gve, "GVE %p AQ %#02x command %#04x failed: %#08x\n",
442 gve, index, opcode, status );
443 DBGC_HDA ( gve, 0, cmd, sizeof ( *cmd ) );
444 DBGC ( gve, "GVE %p AQ error: %s\n", gve, strerror ( rc ) );
445 return rc;
446 }
447
448 DBGC2 ( gve, "GVE %p AQ %#02x command %#04x result:\n",
449 gve, index, opcode );
450 DBGC2_HDA ( gve, 0, cmd, sizeof ( *cmd ) );
451 return 0;
452}
453
454/**
455 * Issue simple admin queue command
456 *
457 * @v gve GVE device
458 * @v opcode Operation code
459 * @v id ID parameter (or zero if not applicable)
460 * @ret rc Return status code
461 *
462 * Several admin queue commands take either an empty parameter list or
463 * a single 32-bit ID parameter.
464 */
465static int gve_admin_simple ( struct gve_nic *gve, unsigned int opcode,
466 unsigned int id ) {
467 union gve_admin_command *cmd;
468 int rc;
469
470 /* Construct request */
471 cmd = gve_admin_command ( gve );
472 cmd->hdr.opcode = opcode;
473 cmd->simple.id = cpu_to_be32 ( id );
474
475 /* Issue command */
476 if ( ( rc = gve_admin ( gve ) ) != 0 )
477 return rc;
478
479 return 0;
480}
481
482/**
483 * Get device descriptor
484 *
485 * @v gve GVE device
486 * @ret rc Return status code
487 */
488static int gve_describe ( struct gve_nic *gve ) {
489 struct net_device *netdev = gve->netdev;
490 struct gve_device_descriptor *desc = &gve->scratch.buf->desc;
491 union gve_admin_command *cmd;
492 struct gve_option *opt;
493 unsigned int count;
494 unsigned int id;
495 size_t offset;
496 size_t max;
497 size_t len;
498 int rc;
499
500 /* Construct request */
501 cmd = gve_admin_command ( gve );
502 cmd->hdr.opcode = GVE_ADMIN_DESCRIBE;
503 cmd->desc.addr = cpu_to_be64 ( dma ( &gve->scratch.map, desc ) );
504 cmd->desc.ver = cpu_to_be32 ( GVE_ADMIN_DESCRIBE_VER );
505 cmd->desc.len = cpu_to_be32 ( sizeof ( *desc ) );
506
507 /* Issue command */
508 if ( ( rc = gve_admin ( gve ) ) != 0 )
509 return rc;
510 DBGC2 ( gve, "GVE %p device descriptor:\n", gve );
511 DBGC2_HDA ( gve, 0, desc, sizeof ( *desc ) );
512
513 /* Extract queue parameters */
514 gve->events.count = be16_to_cpu ( desc->counters );
515 gve->tx.count = be16_to_cpu ( desc->tx_count );
516 gve->rx.count = be16_to_cpu ( desc->rx_count );
517 DBGC ( gve, "GVE %p using %d TX, %d RX, %d events\n",
518 gve, gve->tx.count, gve->rx.count, gve->events.count );
519
520 /* Extract network parameters */
521 build_assert ( sizeof ( desc->mac ) == ETH_ALEN );
522 memcpy ( netdev->hw_addr, &desc->mac, sizeof ( desc->mac ) );
523 netdev->mtu = be16_to_cpu ( desc->mtu );
524 netdev->max_pkt_len = ( netdev->mtu + ETH_HLEN );
525 DBGC ( gve, "GVE %p MAC %s (\"%s\") MTU %zd\n",
526 gve, eth_ntoa ( netdev->hw_addr ),
527 inet_ntoa ( desc->mac.in ), netdev->mtu );
528
529 /* Parse options */
530 count = be16_to_cpu ( desc->opt_count );
531 max = be16_to_cpu ( desc->len );
532 gve->options = 0;
533 for ( offset = offsetof ( typeof ( *desc ), opts ) ; count ;
534 count--, offset += len ) {
535
536 /* Check space for option header */
537 if ( ( offset + sizeof ( *opt ) ) > max ) {
538 DBGC ( gve, "GVE %p underlength option at +%#02zx:\n",
539 gve, offset );
540 DBGC_HDA ( gve, 0, desc, sizeof ( *desc ) );
541 return -EINVAL;
542 }
543 opt = ( ( ( void * ) desc ) + offset );
544
545 /* Check space for option body */
546 len = ( sizeof ( *opt ) + be16_to_cpu ( opt->len ) );
547 if ( ( offset + len ) > max ) {
548 DBGC ( gve, "GVE %p malformed option at +%#02zx:\n",
549 gve, offset );
550 DBGC_HDA ( gve, 0, desc, sizeof ( *desc ) );
551 return -EINVAL;
552 }
553
554 /* Record option as supported */
555 id = be16_to_cpu ( opt->id );
556 if ( id < ( 8 * sizeof ( gve->options ) ) )
557 gve->options |= ( 1 << id );
558 }
559 DBGC ( gve, "GVE %p supports options %#08x\n", gve, gve->options );
560
561 /* Select preferred operating mode */
562 if ( gve->options & ( 1 << GVE_OPT_GQI_QPL ) ) {
563 /* GQI-QPL: in-order queues, queue page list addressing */
564 gve->mode = GVE_MODE_QPL;
565 } else if ( gve->options & ( 1 << GVE_OPT_GQI_RDA ) ) {
566 /* GQI-RDA: in-order queues, raw DMA addressing */
567 gve->mode = 0;
568 } else if ( gve->options & ( 1 << GVE_OPT_DQO_QPL ) ) {
569 /* DQO-QPL: out-of-order queues, queue page list addressing */
570 gve->mode = ( GVE_MODE_DQO | GVE_MODE_QPL );
571 } else if ( gve->options & ( 1 << GVE_OPT_DQO_RDA ) ) {
572 /* DQO-RDA: out-of-order queues, raw DMA addressing */
573 gve->mode = GVE_MODE_DQO;
574 } else {
575 /* No options matched: assume the original GQI-QPL mode */
576 gve->mode = GVE_MODE_QPL;
577 }
578 DBGC ( gve, "GVE %p using %s mode\n",
579 gve, gve_mode_name ( gve->mode ) );
580
581 return 0;
582}
583
584/**
585 * Configure device resources
586 *
587 * @v gve GVE device
588 * @ret rc Return status code
589 */
590static int gve_configure ( struct gve_nic *gve ) {
591 struct gve_events *events = &gve->events;
592 struct gve_irqs *irqs = &gve->irqs;
593 union gve_admin_command *cmd;
595 unsigned int db_off;
596 unsigned int i;
597 int rc;
598
599 /* Construct request */
600 cmd = gve_admin_command ( gve );
601 cmd->hdr.opcode = GVE_ADMIN_CONFIGURE;
602 cmd->conf.events =
603 cpu_to_be64 ( dma ( &events->map, events->event ) );
604 cmd->conf.irqs =
605 cpu_to_be64 ( dma ( &irqs->map, irqs->irq ) );
606 cmd->conf.num_events = cpu_to_be32 ( events->count );
607 cmd->conf.num_irqs = cpu_to_be32 ( GVE_IRQ_COUNT );
608 cmd->conf.irq_stride = cpu_to_be32 ( sizeof ( irqs->irq[0] ) );
609 cmd->conf.format = GVE_FORMAT ( gve->mode );
610
611 /* Issue command */
612 if ( ( rc = gve_admin ( gve ) ) != 0 )
613 return rc;
614
615 /* Disable all interrupts */
616 doorbell = ( ( gve->mode & GVE_MODE_DQO ) ?
618 for ( i = 0 ; i < GVE_IRQ_COUNT ; i++ ) {
619 db_off = ( be32_to_cpu ( irqs->irq[i].db_idx ) *
620 sizeof ( uint32_t ) );
621 DBGC ( gve, "GVE %p IRQ %d doorbell +%#04x\n", gve, i, db_off );
622 irqs->db[i] = ( gve->db + db_off );
623 writel ( doorbell, irqs->db[i] );
624 }
625
626 return 0;
627}
628
629/**
630 * Deconfigure device resources
631 *
632 * @v gve GVE device
633 * @ret rc Return status code
634 */
635static int gve_deconfigure ( struct gve_nic *gve ) {
636 int rc;
637
638 /* Issue command (with meaningless ID) */
639 if ( ( rc = gve_admin_simple ( gve, GVE_ADMIN_DECONFIGURE, 0 ) ) != 0 )
640 return rc;
641
642 return 0;
643}
644
645/**
646 * Register queue page list
647 *
648 * @v gve GVE device
649 * @v qpl Queue page list
650 * @ret rc Return status code
651 */
652static int gve_register ( struct gve_nic *gve, struct gve_qpl *qpl ) {
653 struct gve_pages *pages = &gve->scratch.buf->pages;
654 union gve_admin_command *cmd;
655 void *addr;
656 unsigned int i;
657 int rc;
658
659 /* Do nothing if using raw DMA addressing */
660 if ( ! ( gve->mode & GVE_MODE_QPL ) )
661 return 0;
662
663 /* Build page address list */
664 for ( i = 0 ; i < qpl->count ; i++ ) {
665 addr = ( qpl->data + ( i * GVE_PAGE_SIZE ) );
666 pages->addr[i] = cpu_to_be64 ( dma ( &qpl->map, addr ) );
667 }
668
669 /* Construct request */
670 cmd = gve_admin_command ( gve );
671 cmd->hdr.opcode = GVE_ADMIN_REGISTER;
672 cmd->reg.id = cpu_to_be32 ( qpl->id );
673 cmd->reg.count = cpu_to_be32 ( qpl->count );
674 cmd->reg.addr = cpu_to_be64 ( dma ( &gve->scratch.map, pages ) );
675 cmd->reg.size = cpu_to_be64 ( GVE_PAGE_SIZE );
676
677 /* Issue command */
678 if ( ( rc = gve_admin ( gve ) ) != 0 )
679 return rc;
680
681 return 0;
682}
683
684/**
685 * Unregister page list
686 *
687 * @v gve GVE device
688 * @v qpl Queue page list
689 * @ret rc Return status code
690 */
691static int gve_unregister ( struct gve_nic *gve, struct gve_qpl *qpl ) {
692 int rc;
693
694 /* Do nothing if using raw DMA addressing */
695 if ( ! ( gve->mode & GVE_MODE_QPL ) )
696 return 0;
697
698 /* Issue command */
700 qpl->id ) ) != 0 ) {
701 return rc;
702 }
703
704 return 0;
705}
706
707/**
708 * Construct command to create transmit queue
709 *
710 * @v queue Transmit queue
711 * @v qpl Queue page list ID
712 * @v cmd Admin queue command
713 */
714static void gve_create_tx_param ( struct gve_queue *queue, uint32_t qpl,
715 union gve_admin_command *cmd ) {
716 struct gve_admin_create_tx *create = &cmd->create_tx;
717 const struct gve_queue_type *type = queue->type;
718
719 /* Construct request parameters */
720 create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) );
721 create->desc =
722 cpu_to_be64 ( dma ( &queue->desc_map, queue->desc.raw ) );
723 create->qpl_id = cpu_to_be32 ( qpl );
724 create->notify_id = cpu_to_be32 ( type->irq );
725 create->desc_count = cpu_to_be16 ( queue->count );
726 if ( queue->cmplt.raw ) {
727 create->cmplt = cpu_to_be64 ( dma ( &queue->cmplt_map,
728 queue->cmplt.raw ) );
729 create->cmplt_count = cpu_to_be16 ( queue->count );
730 }
731}
732
733/**
734 * Construct command to create receive queue
735 *
736 * @v queue Receive queue
737 * @v qpl Queue page list ID
738 * @v cmd Admin queue command
739 */
741 union gve_admin_command *cmd ) {
742 struct gve_admin_create_rx *create = &cmd->create_rx;
743 const struct gve_queue_type *type = queue->type;
744
745 /* Construct request parameters */
746 create->notify_id = cpu_to_be32 ( type->irq );
747 create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) );
748 create->desc =
749 cpu_to_be64 ( dma ( &queue->desc_map, queue->desc.raw ) );
750 create->cmplt =
751 cpu_to_be64 ( dma ( &queue->cmplt_map, queue->cmplt.raw ) );
752 create->qpl_id = cpu_to_be32 ( qpl );
753 create->desc_count = cpu_to_be16 ( queue->count );
754 create->bufsz = cpu_to_be16 ( GVE_BUF_SIZE );
755 create->cmplt_count = cpu_to_be16 ( queue->count );
756}
757
758/**
759 * Create transmit or receive queue
760 *
761 * @v gve GVE device
762 * @v queue Descriptor queue
763 * @ret rc Return status code
764 */
765static int gve_create_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
766 const struct gve_queue_type *type = queue->type;
767 const struct gve_queue_stride *stride = &queue->stride;
768 union gve_admin_command *cmd;
769 struct gve_buffer *buf;
770 unsigned int db_off;
771 unsigned int evt_idx;
772 unsigned int tag;
773 unsigned int i;
774 uint32_t qpl;
775 int rc;
776
777 /* Reset queue */
778 queue->prod = 0;
779 queue->cons = 0;
780 queue->done = 0;
781 memset ( queue->desc.raw, 0, ( queue->count * stride->desc ) );
782 memset ( queue->cmplt.raw, 0, ( queue->count * stride->cmplt ) );
783 for ( i = 0 ; i < queue->fill ; i++ )
784 queue->tag[i] = i;
785
786 /* Pre-populate descriptor offsets for in-order queues */
787 if ( ! ( gve->mode & GVE_MODE_DQO ) ) {
788 buf = ( queue->desc.raw + stride->desc - sizeof ( *buf ) );
789 for ( i = 0 ; i < queue->count ; i++ ) {
790 tag = ( i & ( queue->fill - 1 ) );
791 buf->addr = cpu_to_be64 ( gve_address ( queue, tag ) );
792 buf = ( ( ( void * ) buf ) + stride->desc );
793 }
794 }
795
796 /* Construct request */
797 cmd = gve_admin_command ( gve );
798 cmd->hdr.opcode = type->create;
799 qpl = ( ( gve->mode & GVE_MODE_QPL ) ? type->qpl : GVE_RAW_QPL );
800 type->param ( queue, qpl, cmd );
801
802 /* Issue command */
803 if ( ( rc = gve_admin ( gve ) ) != 0 )
804 return rc;
805
806 /* Record indices */
807 db_off = ( be32_to_cpu ( queue->res->db_idx ) * sizeof ( uint32_t ) );
808 evt_idx = be32_to_cpu ( queue->res->evt_idx );
809 DBGC ( gve, "GVE %p %s doorbell +%#04x event counter %d\n",
810 gve, type->name, db_off, evt_idx );
811 queue->db = ( gve->db + db_off );
812 assert ( evt_idx < gve->events.count );
813 queue->event = &gve->events.event[evt_idx];
814 assert ( queue->event->count == 0 );
815
816 /* Unmask dummy interrupt */
817 pci_msix_unmask ( &gve->msix, type->irq );
818
819 /* Rearm queue interrupt if applicable */
820 if ( gve->mode & GVE_MODE_DQO )
821 writel ( GVE_DQO_IRQ_REARM, gve->irqs.db[type->irq] );
822
823 return 0;
824}
825
826/**
827 * Destroy transmit or receive queue
828 *
829 * @v gve GVE device
830 * @v queue Descriptor queue
831 * @ret rc Return status code
832 */
833static int gve_destroy_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
834 const struct gve_queue_type *type = queue->type;
835 int rc;
836
837 /* Mask dummy interrupt */
838 pci_msix_mask ( &gve->msix, type->irq );
839
840 /* Issue command */
841 if ( ( rc = gve_admin_simple ( gve, type->destroy, 0 ) ) != 0 )
842 return rc;
843
844 return 0;
845}
846
847/******************************************************************************
848 *
849 * Network device interface
850 *
851 ******************************************************************************
852 */
853
854/**
855 * Allocate shared queue resources
856 *
857 * @v gve GVE device
858 * @ret rc Return status code
859 */
860static int gve_alloc_shared ( struct gve_nic *gve ) {
861 struct dma_device *dma = gve->dma;
862 struct gve_irqs *irqs = &gve->irqs;
863 struct gve_events *events = &gve->events;
864 size_t irqs_len = ( GVE_IRQ_COUNT * sizeof ( irqs->irq[0] ) );
865 size_t events_len = ( gve->events.count * sizeof ( events->event[0] ) );
866 int rc;
867
868 /* Allocate interrupt channels */
869 irqs->irq = dma_alloc ( dma, &irqs->map, irqs_len, GVE_ALIGN );
870 if ( ! irqs->irq ) {
871 rc = -ENOMEM;
872 goto err_irqs;
873 }
874 DBGC ( gve, "GVE %p IRQs at [%08lx,%08lx)\n",
875 gve, virt_to_phys ( irqs->irq ),
876 ( virt_to_phys ( irqs->irq ) + irqs_len ) );
877
878 /* Allocate event counters */
879 events->event = dma_alloc ( dma, &events->map, events_len, GVE_ALIGN );
880 if ( ! events->event ) {
881 rc = -ENOMEM;
882 goto err_events;
883 }
884 DBGC ( gve, "GVE %p events at [%08lx,%08lx)\n",
885 gve, virt_to_phys ( events->event ),
886 ( virt_to_phys ( events->event ) + events_len ) );
887
888 return 0;
889
890 dma_free ( &events->map, events->event, events_len );
891 err_events:
892 dma_free ( &irqs->map, irqs->irq, irqs_len );
893 err_irqs:
894 return rc;
895}
896
897/**
898 * Free shared queue resources
899 *
900 * @v gve GVE device
901 */
902static void gve_free_shared ( struct gve_nic *gve ) {
903 struct gve_irqs *irqs = &gve->irqs;
904 struct gve_events *events = &gve->events;
905 size_t irqs_len = ( GVE_IRQ_COUNT * sizeof ( irqs->irq[0] ) );
906 size_t events_len = ( gve->events.count * sizeof ( events->event[0] ) );
907
908 /* Free event counters */
909 dma_free ( &events->map, events->event, events_len );
910
911 /* Free interrupt channels */
912 dma_free ( &irqs->map, irqs->irq, irqs_len );
913}
914
915/**
916 * Allocate queue page list
917 *
918 * @v gve GVE device
919 * @v qpl Queue page list
920 * @v id Queue page list ID
921 * @v buffers Number of data buffers
922 * @ret rc Return status code
923 */
924static int gve_alloc_qpl ( struct gve_nic *gve, struct gve_qpl *qpl,
925 uint32_t id, unsigned int buffers ) {
926 size_t len;
927
928 /* Record ID */
929 qpl->id = id;
930
931 /* Calculate number of pages required */
933 qpl->count = ( ( buffers + GVE_BUF_PER_PAGE - 1 ) / GVE_BUF_PER_PAGE );
934 assert ( qpl->count <= GVE_QPL_MAX );
935
936 /* Allocate pages (as a single block) */
937 len = ( qpl->count * GVE_PAGE_SIZE );
938 qpl->data = dma_umalloc ( gve->dma, &qpl->map, len, GVE_ALIGN );
939 if ( ! qpl->data )
940 return -ENOMEM;
941 qpl->base = ( ( gve->mode == GVE_MODE_QPL ) ?
942 0 : dma ( &qpl->map, qpl->data ) );
943
944 DBGC ( gve, "GVE %p QPL %#08x at [%08lx,%08lx)\n",
945 gve, qpl->id, virt_to_phys ( qpl->data ),
946 ( virt_to_phys ( qpl->data ) + len ) );
947 return 0;
948}
949
950/**
951 * Free queue page list
952 *
953 * @v gve GVE device
954 * @v qpl Queue page list
955 */
956static void gve_free_qpl ( struct gve_nic *nic __unused,
957 struct gve_qpl *qpl ) {
958 size_t len = ( qpl->count * GVE_PAGE_SIZE );
959
960 /* Free pages */
961 dma_ufree ( &qpl->map, qpl->data, len );
962}
963
964/**
965 * Calculate next receive sequence number
966 *
967 * @v seq Current sequence number, or zero to start sequence
968 * @ret next Next sequence number
969 */
970static inline __attribute__ (( always_inline )) unsigned int
971gve_next ( unsigned int seq ) {
972
973 /* The receive completion sequence number is a modulo 7
974 * counter that cycles through the non-zero three-bit values 1
975 * to 7 inclusive.
976 *
977 * Since 7 is coprime to 2^n, this ensures that the sequence
978 * number changes each time that a new completion is written
979 * to memory.
980 *
981 * Since the counter takes only non-zero values, this ensures
982 * that the sequence number changes whenever a new completion
983 * is first written to a zero-initialised completion ring.
984 */
985 seq = ( ( seq + 1 ) & GVE_GQI_RX_SEQ_MASK );
986 return ( seq ? seq : 1 );
987}
988
989/**
990 * Allocate descriptor queue
991 *
992 * @v gve GVE device
993 * @v queue Descriptor queue
994 * @ret rc Return status code
995 */
996static int gve_alloc_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
997 const struct gve_queue_type *type = queue->type;
998 struct gve_queue_stride *stride = &queue->stride;
999 struct dma_device *dma = gve->dma;
1000 size_t desc_len;
1001 size_t cmplt_len;
1002 size_t res_len;
1003 int rc;
1004
1005 /* Sanity checks */
1006 if ( ( queue->count == 0 ) ||
1007 ( queue->count & ( queue->count - 1 ) ) ) {
1008 DBGC ( gve, "GVE %p %s invalid queue size %d\n",
1009 gve, type->name, queue->count );
1010 rc = -EINVAL;
1011 goto err_sanity;
1012 }
1013
1014 /* Set queue strides and calculate total lengths */
1015 *stride = ( ( gve->mode & GVE_MODE_DQO ) ?
1016 type->stride.dqo : type->stride.gqi );
1017 desc_len = ( queue->count * stride->desc );
1018 cmplt_len = ( queue->count * stride->cmplt );
1019 res_len = sizeof ( *queue->res );
1020
1021 /* Calculate maximum fill level */
1022 assert ( ( type->fill & ( type->fill - 1 ) ) == 0 );
1023 queue->fill = type->fill;
1024 if ( queue->fill > queue->count )
1025 queue->fill = queue->count;
1026 DBGC ( gve, "GVE %p %s using QPL %#08x with %d/%d descriptors\n",
1027 gve, type->name, type->qpl, queue->fill, queue->count );
1028
1029 /* Allocate queue page list */
1030 if ( ( rc = gve_alloc_qpl ( gve, &queue->qpl, type->qpl,
1031 queue->fill ) ) != 0 )
1032 goto err_qpl;
1033
1034 /* Allocate descriptors */
1035 queue->desc.raw = dma_umalloc ( dma, &queue->desc_map, desc_len,
1036 GVE_ALIGN );
1037 if ( ! queue->desc.raw ) {
1038 rc = -ENOMEM;
1039 goto err_desc;
1040 }
1041 DBGC ( gve, "GVE %p %s descriptors at [%08lx,%08lx)\n",
1042 gve, type->name, virt_to_phys ( queue->desc.raw ),
1043 ( virt_to_phys ( queue->desc.raw ) + desc_len ) );
1044
1045 /* Allocate completions */
1046 if ( cmplt_len ) {
1047 queue->cmplt.raw = dma_umalloc ( dma, &queue->cmplt_map,
1048 cmplt_len, GVE_ALIGN );
1049 if ( ! queue->cmplt.raw ) {
1050 rc = -ENOMEM;
1051 goto err_cmplt;
1052 }
1053 DBGC ( gve, "GVE %p %s completions at [%08lx,%08lx)\n",
1054 gve, type->name, virt_to_phys ( queue->cmplt.raw ),
1055 ( virt_to_phys ( queue->cmplt.raw ) + cmplt_len ) );
1056 }
1057
1058 /* Allocate queue resources */
1059 queue->res = dma_alloc ( dma, &queue->res_map, res_len, GVE_ALIGN );
1060 if ( ! queue->res ) {
1061 rc = -ENOMEM;
1062 goto err_res;
1063 }
1064 memset ( queue->res, 0, res_len );
1065
1066 return 0;
1067
1068 dma_free ( &queue->res_map, queue->res, res_len );
1069 err_res:
1070 if ( cmplt_len )
1071 dma_ufree ( &queue->cmplt_map, queue->cmplt.raw, cmplt_len );
1072 err_cmplt:
1073 dma_ufree ( &queue->desc_map, queue->desc.raw, desc_len );
1074 err_desc:
1075 gve_free_qpl ( gve, &queue->qpl );
1076 err_qpl:
1077 err_sanity:
1078 return rc;
1079}
1080
1081/**
1082 * Free descriptor queue
1083 *
1084 * @v gve GVE device
1085 * @v queue Descriptor queue
1086 */
1087static void gve_free_queue ( struct gve_nic *gve, struct gve_queue *queue ) {
1088 const struct gve_queue_stride *stride = &queue->stride;
1089 size_t desc_len = ( queue->count * stride->desc );
1090 size_t cmplt_len = ( queue->count * stride->cmplt );
1091 size_t res_len = sizeof ( *queue->res );
1092
1093 /* Free queue resources */
1094 dma_free ( &queue->res_map, queue->res, res_len );
1095
1096 /* Free completions, if applicable */
1097 if ( cmplt_len )
1098 dma_ufree ( &queue->cmplt_map, queue->cmplt.raw, cmplt_len );
1099
1100 /* Free descriptors */
1101 dma_ufree ( &queue->desc_map, queue->desc.raw, desc_len );
1102
1103 /* Free queue page list */
1104 gve_free_qpl ( gve, &queue->qpl );
1105}
1106
1107/**
1108 * Cancel any pending transmissions
1109 *
1110 * @v gve GVE device
1111 */
1112static void gve_cancel_tx ( struct gve_nic *gve ) {
1113 struct net_device *netdev = gve->netdev;
1114 struct io_buffer *iobuf;
1115 unsigned int i;
1116
1117 /* Cancel any pending transmissions */
1118 for ( i = 0 ; i < ( sizeof ( gve->tx_iobuf ) /
1119 sizeof ( gve->tx_iobuf[0] ) ) ; i++ ) {
1120 iobuf = gve->tx_iobuf[i];
1121 gve->tx_iobuf[i] = NULL;
1122 if ( iobuf )
1124 }
1125}
1126
1127/**
1128 * Start up device
1129 *
1130 * @v gve GVE device
1131 * @ret rc Return status code
1132 */
1133static int gve_start ( struct gve_nic *gve ) {
1134 struct gve_queue *tx = &gve->tx;
1135 struct gve_queue *rx = &gve->rx;
1136 int rc;
1137
1138 /* Cancel any pending transmissions */
1139 gve_cancel_tx ( gve );
1140
1141 /* Reset receive sequence */
1142 gve->seq = gve_next ( 0 );
1143
1144 /* Configure device resources */
1145 if ( ( rc = gve_configure ( gve ) ) != 0 )
1146 goto err_configure;
1147
1148 /* Register transmit queue page list */
1149 if ( ( rc = gve_register ( gve, &tx->qpl ) ) != 0 )
1150 goto err_register_tx;
1151
1152 /* Register receive queue page list */
1153 if ( ( rc = gve_register ( gve, &rx->qpl ) ) != 0 )
1154 goto err_register_rx;
1155
1156 /* Create transmit queue */
1157 if ( ( rc = gve_create_queue ( gve, tx ) ) != 0 )
1158 goto err_create_tx;
1159
1160 /* Create receive queue */
1161 if ( ( rc = gve_create_queue ( gve, rx ) ) != 0 )
1162 goto err_create_rx;
1163
1164 return 0;
1165
1166 gve_destroy_queue ( gve, rx );
1167 err_create_rx:
1168 gve_destroy_queue ( gve, tx );
1169 err_create_tx:
1170 gve_unregister ( gve, &rx->qpl );
1171 err_register_rx:
1172 gve_unregister ( gve, &tx->qpl );
1173 err_register_tx:
1174 gve_deconfigure ( gve );
1175 err_configure:
1176 return rc;
1177}
1178
1179/**
1180 * Stop device
1181 *
1182 * @v gve GVE device
1183 */
1184static void gve_stop ( struct gve_nic *gve ) {
1185 struct gve_queue *tx = &gve->tx;
1186 struct gve_queue *rx = &gve->rx;
1187
1188 /* Destroy queues */
1189 gve_destroy_queue ( gve, rx );
1190 gve_destroy_queue ( gve, tx );
1191
1192 /* Unregister page lists */
1193 gve_unregister ( gve, &rx->qpl );
1194 gve_unregister ( gve, &tx->qpl );
1195
1196 /* Deconfigure device */
1197 gve_deconfigure ( gve );
1198}
1199
1200/**
1201 * Device startup process
1202 *
1203 * @v gve GVE device
1204 */
1205static void gve_startup ( struct gve_nic *gve ) {
1206 struct net_device *netdev = gve->netdev;
1207 int rc;
1208
1209 /* Reset device */
1210 if ( ( rc = gve_reset ( gve ) ) != 0 )
1211 goto err_reset;
1212
1213 /* Enable admin queue */
1214 gve_admin_enable ( gve );
1215
1216 /* Start device */
1217 if ( ( rc = gve_start ( gve ) ) != 0 )
1218 goto err_start;
1219
1220 /* Reset retry count */
1221 gve->retries = 0;
1222
1223 /* (Ab)use link status to report startup status */
1225
1226 return;
1227
1228 gve_stop ( gve );
1229 err_start:
1230 err_reset:
1231 DBGC ( gve, "GVE %p startup failed: %s\n", gve, strerror ( rc ) );
1233 if ( gve->retries++ < GVE_RESET_MAX_RETRY )
1234 process_add ( &gve->startup );
1235}
1236
1237/**
1238 * Trigger startup process
1239 *
1240 * @v gve GVE device
1241 */
1242static void gve_restart ( struct gve_nic *gve ) {
1243 struct net_device *netdev = gve->netdev;
1244
1245 /* Mark link down to inhibit polling and transmit activity */
1247
1248 /* Schedule startup process */
1249 process_add ( &gve->startup );
1250}
1251
1252/**
1253 * Reset recovery watchdog
1254 *
1255 * @v timer Reset recovery watchdog timer
1256 * @v over Failure indicator
1257 */
1258static void gve_watchdog ( struct retry_timer *timer, int over __unused ) {
1259 struct gve_nic *gve = container_of ( timer, struct gve_nic, watchdog );
1261 uint32_t pfn;
1262 int rc;
1263
1264 /* Reschedule watchdog */
1266
1267 /* Reset device (for test purposes) if applicable */
1268 if ( ( rc = inject_fault ( VM_MIGRATED_RATE ) ) != 0 ) {
1269 DBGC ( gve, "GVE %p synthesising host reset\n", gve );
1270 writel ( 0, gve->cfg + GVE_CFG_ADMIN_PFN );
1271 }
1272
1273 /* Check for activity since last timer invocation */
1274 activity = ( gve->tx.cons + gve->rx.cons );
1275 if ( activity != gve->activity ) {
1276 gve->activity = activity;
1277 return;
1278 }
1279
1280 /* Check for reset */
1281 pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN );
1282 if ( pfn ) {
1283 DBGC2 ( gve, "GVE %p idle but not in reset\n", gve );
1284 return;
1285 }
1286
1287 /* Schedule restart */
1288 DBGC ( gve, "GVE %p watchdog detected reset by host\n", gve );
1289 gve_restart ( gve );
1290}
1291
1292/**
1293 * Open network device
1294 *
1295 * @v netdev Network device
1296 * @ret rc Return status code
1297 */
1298static int gve_open ( struct net_device *netdev ) {
1299 struct gve_nic *gve = netdev->priv;
1300 struct gve_queue *tx = &gve->tx;
1301 struct gve_queue *rx = &gve->rx;
1302 int rc;
1303
1304 /* Allocate shared queue resources */
1305 if ( ( rc = gve_alloc_shared ( gve ) ) != 0 )
1306 goto err_alloc_shared;
1307
1308 /* Allocate and prepopulate transmit queue */
1309 if ( ( rc = gve_alloc_queue ( gve, tx ) ) != 0 )
1310 goto err_alloc_tx;
1311
1312 /* Allocate and prepopulate receive queue */
1313 if ( ( rc = gve_alloc_queue ( gve, rx ) ) != 0 )
1314 goto err_alloc_rx;
1315
1316 /* Trigger startup */
1317 gve_restart ( gve );
1318
1319 /* Start reset recovery watchdog timer */
1321
1322 return 0;
1323
1324 gve_free_queue ( gve, rx );
1325 err_alloc_rx:
1326 gve_free_queue ( gve, tx );
1327 err_alloc_tx:
1328 gve_free_shared ( gve );
1329 err_alloc_shared:
1330 return rc;
1331}
1332
1333/**
1334 * Close network device
1335 *
1336 * @v netdev Network device
1337 */
1338static void gve_close ( struct net_device *netdev ) {
1339 struct gve_nic *gve = netdev->priv;
1340 struct gve_queue *tx = &gve->tx;
1341 struct gve_queue *rx = &gve->rx;
1342
1343 /* Stop reset recovery timer */
1344 stop_timer ( &gve->watchdog );
1345
1346 /* Terminate startup process */
1347 process_del ( &gve->startup );
1348
1349 /* Stop and reset device */
1350 gve_stop ( gve );
1351 gve_reset ( gve );
1352
1353 /* Cancel any pending transmissions */
1354 gve_cancel_tx ( gve );
1355
1356 /* Free queues */
1357 gve_free_queue ( gve, rx );
1358 gve_free_queue ( gve, tx );
1359
1360 /* Free shared queue resources */
1361 gve_free_shared ( gve );
1362}
1363
1364/**
1365 * Transmit packet
1366 *
1367 * @v netdev Network device
1368 * @v iobuf I/O buffer
1369 * @ret rc Return status code
1370 */
1371static int gve_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
1372 struct gve_nic *gve = netdev->priv;
1373 struct gve_queue *tx = &gve->tx;
1374 struct gve_gqi_tx_descriptor *gqi;
1375 struct gve_dqo_tx_descriptor *dqo;
1376 unsigned int count;
1377 unsigned int index;
1378 unsigned int tag;
1379 unsigned int chain;
1381 size_t frag_len;
1382 size_t offset;
1383 size_t next;
1384 size_t len;
1385
1386 /* Do nothing if queues are not yet set up */
1387 if ( ! netdev_link_ok ( netdev ) )
1388 return -ENETDOWN;
1389
1390 /* Defer packet if there is no space in the transmit ring */
1391 len = iob_len ( iobuf );
1392 count = ( ( len + GVE_BUF_SIZE - 1 ) / GVE_BUF_SIZE );
1393 if ( ( ( tx->prod - tx->cons ) + count ) > tx->fill ) {
1394 netdev_tx_defer ( netdev, iobuf );
1395 return 0;
1396 }
1397
1398 /* Copy packet to queue pages and populate descriptors */
1399 for ( offset = 0, chain = 0 ; ; offset = next, chain = tag ) {
1400
1401 /* Identify next available buffer */
1402 index = ( tx->prod++ & ( tx->count - 1 ) );
1403 tag = tx->tag[ index % GVE_TX_FILL ];
1404
1405 /* Sanity check */
1406 assert ( gve->tx_iobuf[tag] == NULL );
1407
1408 /* Copy packet fragment */
1409 frag_len = ( len - offset );
1410 if ( frag_len > GVE_BUF_SIZE )
1411 frag_len = GVE_BUF_SIZE;
1412 memcpy ( gve_buffer ( tx, tag ),
1413 ( iobuf->data + offset ), frag_len );
1414 next = ( offset + frag_len );
1415
1416 /* Populate descriptor */
1417 if ( gve->mode & GVE_MODE_DQO ) {
1418
1419 /* Out-of-order descriptor */
1420 dqo = &tx->desc.tx.dqo[index];
1421 dqo->buf.addr =
1422 cpu_to_le64 ( gve_address ( tx, tag ) );
1423 if ( next == len ) {
1424 dqo->type = ( GVE_DQO_TX_TYPE_PACKET |
1426 dqo->tag.id = tag;
1427 dqo->tag.count = count;
1428 } else {
1430 dqo->tag.id = 0;
1431 dqo->tag.count = 0;
1432 }
1433 dqo->len = cpu_to_le16 ( frag_len );
1434 gve->tx_chain[tag] = chain;
1435
1436 } else {
1437
1438 /* In-order descriptor */
1439 gqi = &tx->desc.tx.gqi[index];
1440 if ( offset ) {
1442 gqi->count = 0;
1443 gqi->total = 0;
1444 } else {
1446 gqi->count = count;
1447 gqi->total = cpu_to_be16 ( len );
1448 }
1449 gqi->len = cpu_to_be16 ( frag_len );
1450
1451 }
1452 DBGC2 ( gve, "GVE %p TXD %#04x %#02x:%#02x len %#04zx/%#04zx "
1453 "at %#08lx\n", gve, index, tag, count, frag_len, len,
1454 gve_address ( tx, tag ) );
1455
1456 /* Record I/O buffer against final descriptor */
1457 if ( next == len ) {
1458 gve->tx_iobuf[tag] = iobuf;
1459 break;
1460 }
1461 }
1462 assert ( ( tx->prod - tx->cons ) <= tx->fill );
1463
1464 /* Ring doorbell */
1465 doorbell = tx->prod;
1466 if ( gve->mode & GVE_MODE_DQO ) {
1467 doorbell &= ( tx->count - 1 );
1468 } else {
1470 }
1471 wmb();
1472 writel ( doorbell, tx->db );
1473
1474 return 0;
1475}
1476
1477/**
1478 * Poll for completed transmissions
1479 *
1480 * @v netdev Network device
1481 */
1482static void gve_poll_tx ( struct net_device *netdev ) {
1483 struct gve_nic *gve = netdev->priv;
1484 struct gve_queue *tx = &gve->tx;
1485 struct gve_dqo_tx_completion *dqo;
1486 struct io_buffer *iobuf;
1487 unsigned int index;
1488 unsigned int gen;
1489 unsigned int bit;
1490 unsigned int tag;
1492
1493 /* Process transmit completions */
1494 if ( gve->mode & GVE_MODE_DQO ) {
1495
1496 /* Out-of-order completions */
1497 while ( 1 ) {
1498
1499 /* Read next possible completion */
1500 gen = ( tx->done & tx->count );
1501 index = ( tx->done & ( tx->count - 1 ) );
1502 dqo = &tx->cmplt.tx.dqo[index];
1503
1504 /* Check generation bit */
1505 bit = ( dqo->flags & GVE_DQO_TXF_GEN );
1506 if ( ( !! bit ) == ( !! gen ) )
1507 break;
1508 rmb();
1509 tx->done++;
1510
1511 /* Ignore non-packet completions */
1512 if ( ( ! ( dqo->flags & GVE_DQO_TXF_PKT ) ) ||
1513 ( dqo->tag.count < 0 ) ) {
1514 DBGC2 ( gve, "GVE %p TXC %#04x flags %#02x "
1515 "ignored\n", gve, index, dqo->flags );
1516 continue;
1517 }
1518
1519 /* Parse completion */
1520 tag = dqo->tag.id;
1521 count = dqo->tag.count;
1522 iobuf = gve->tx_iobuf[tag];
1523 gve->tx_iobuf[tag] = NULL;
1524 assert ( iobuf != NULL );
1525
1526 /* Return completed descriptors to ring */
1527 while ( count-- ) {
1528 DBGC2 ( gve, "GVE %p TXC %#04x %#02x:%#02x "
1529 "complete\n", gve, index, tag,
1530 dqo->tag.count );
1531 tx->tag[ tx->cons++ % GVE_TX_FILL ] = tag;
1532 tag = gve->tx_chain[tag];
1533 }
1534
1535 /* Hand off to network stack */
1536 if ( iobuf )
1537 netdev_tx_complete ( netdev, iobuf );
1538 }
1539
1540 } else {
1541
1542 /* Read event counter */
1543 count = be32_to_cpu ( tx->event->count );
1544
1545 /* Process transmit completions */
1546 while ( count != tx->cons ) {
1547 DBGC2 ( gve, "GVE %p TXC %#04x complete\n",
1548 gve, tx->cons );
1549 tag = ( tx->cons % GVE_TX_FILL );
1550 iobuf = gve->tx_iobuf[tag];
1551 gve->tx_iobuf[tag] = NULL;
1552 tx->cons++;
1553 if ( iobuf )
1554 netdev_tx_complete ( netdev, iobuf );
1555 }
1556 }
1557}
1558
1559/**
1560 * Poll for received packets
1561 *
1562 * @v netdev Network device
1563 */
1564static void gve_poll_rx ( struct net_device *netdev ) {
1565 struct gve_nic *gve = netdev->priv;
1566 struct gve_queue *rx = &gve->rx;
1567 struct gve_gqi_rx_completion *gqi;
1568 struct gve_dqo_rx_completion *dqo;
1569 struct io_buffer *iobuf;
1570 unsigned int index;
1571 unsigned int gen;
1572 unsigned int bit;
1573 unsigned int seq;
1574 unsigned int tag;
1575 uint32_t done;
1576 size_t total;
1577 size_t len;
1578 int rc;
1579
1580 /* Process receive completions */
1581 done = rx->done;
1582 seq = gve->seq;
1583 total = 0;
1584 while ( 1 ) {
1585
1586 /* Read next possible completion */
1587 rc = 0;
1588 gen = ( done & rx->count );
1589 index = ( done++ & ( rx->count - 1 ) );
1590 if ( gve->mode & GVE_MODE_DQO ) {
1591
1592 /* Out-of-order completion */
1593 dqo = &rx->cmplt.rx.dqo[index];
1594
1595 /* Check generation bit */
1596 bit = ( dqo->len & cpu_to_le16 ( GVE_DQO_RXL_GEN ) );
1597 if ( ( !! bit ) == ( !! gen ) )
1598 break;
1599 rmb();
1600
1601 /* Parse completion */
1602 len = ( le16_to_cpu ( dqo->len ) &
1603 ( GVE_BUF_SIZE - 1 ) );
1604 tag = dqo->tag;
1605 DBGC2 ( gve, "GVE %p RXC %#04x %#02x:%#02x len %#04zx "
1606 "at %#08zx\n", gve, index, tag, dqo->flags,
1607 len, gve_offset ( rx, tag ) );
1608
1609 /* Accumulate a complete packet */
1610 if ( dqo->status & GVE_DQO_RXS_ERROR ) {
1611 rc = -EIO;
1612 total = 0;
1613 } else {
1614 total += len;
1615 if ( ! ( dqo->flags & GVE_DQO_RXF_LAST ) )
1616 continue;
1617 }
1618
1619 } else {
1620
1621 /* In-order completion */
1622 gqi = &rx->cmplt.rx.gqi[index];
1623
1624 /* Check sequence number */
1625 if ( ( gqi->seq & GVE_GQI_RX_SEQ_MASK ) != seq )
1626 break;
1627 rmb();
1628 seq = gve_next ( seq );
1629
1630 /* Parse completion */
1631 len = be16_to_cpu ( gqi->len );
1632 tag = ( index % GVE_RX_FILL );
1633 DBGC2 ( gve, "GVE %p RXC %#04x %#02x:%#02x len %#04zx "
1634 "at %#08zx\n", gve, index, gqi->seq,
1635 gqi->flags, len, gve_offset ( rx, tag ) );
1636
1637 /* Accumulate a complete packet */
1638 if ( gqi->flags & GVE_GQI_RXF_ERROR ) {
1639 rc = -EIO;
1640 total = 0;
1641 } else {
1642 total += len;
1643 if ( gqi->flags & GVE_GQI_RXF_MORE )
1644 continue;
1645 }
1646 gve->seq = seq;
1647 }
1648
1649 /* Allocate and populate I/O buffer */
1650 iobuf = ( total ? alloc_iob ( total ) : NULL );
1651 for ( ; rx->done != done ; rx->done++ ) {
1652
1653 /* Re-read completion and return tag to ring */
1654 index = ( rx->done & ( rx->count - 1 ) );
1655 if ( gve->mode & GVE_MODE_DQO ) {
1656 dqo = &rx->cmplt.rx.dqo[index];
1657 tag = dqo->tag;
1658 len = ( le16_to_cpu ( dqo->len ) &
1659 ( GVE_BUF_SIZE - 1 ) );
1660 rx->tag[ rx->cons++ % GVE_RX_FILL ] = tag;
1661 } else {
1662 gqi = &rx->cmplt.rx.gqi[index];
1663 tag = ( index % GVE_RX_FILL );
1664 len = be16_to_cpu ( gqi->len );
1665 assert ( rx->cons == rx->done );
1666 rx->cons++;
1667 }
1668
1669 /* Copy data */
1670 if ( iobuf ) {
1671 memcpy ( iob_put ( iobuf, len ),
1672 gve_buffer ( rx, tag ), len );
1673 }
1674 }
1675 assert ( ( iobuf == NULL ) || ( iob_len ( iobuf ) == total ) );
1676 total = 0;
1677
1678 /* Hand off packet to network stack */
1679 if ( iobuf ) {
1680 if ( ! ( gve->mode & GVE_MODE_DQO ) )
1681 iob_pull ( iobuf, GVE_GQI_RX_PAD );
1682 netdev_rx ( netdev, iobuf );
1683 } else {
1684 netdev_rx_err ( netdev, NULL, ( rc ? rc : -ENOMEM ) );
1685 }
1686 }
1687}
1688
1689/**
1690 * Refill receive queue
1691 *
1692 * @v netdev Network device
1693 */
1694static void gve_refill_rx ( struct net_device *netdev ) {
1695 struct gve_nic *gve = netdev->priv;
1696 struct gve_queue *rx = &gve->rx;
1697 struct gve_dqo_rx_descriptor *dqo;
1698 unsigned int refill;
1699 unsigned int index;
1700 unsigned int tag;
1702
1703 /* Calculate refill quantity */
1704 doorbell = ( rx->cons + rx->fill );
1705 refill = ( doorbell - rx->prod );
1706 if ( ! refill )
1707 return;
1708
1709 /* Refill ring */
1710 if ( gve->mode & GVE_MODE_DQO ) {
1711
1712 /* Out-of-order descriptors */
1713 while ( refill-- ) {
1714
1715 /* Identify next available buffer */
1716 index = ( rx->prod++ & ( rx->count - 1 ) );
1717 tag = rx->tag[ index % GVE_RX_FILL ];
1718
1719 /* Populate descriptor */
1720 dqo = &rx->desc.rx.dqo[index];
1721 dqo->tag = tag;
1722 dqo->buf.addr =
1723 cpu_to_le64 ( gve_address ( rx, tag ) );
1724 DBGC2 ( gve, "GVE %p RXD %#04x:%#02x at %#08llx\n",
1725 gve, index, dqo->tag,
1726 ( ( unsigned long long )
1727 le64_to_cpu ( dqo->buf.addr ) ) );
1728 }
1729 wmb();
1730 assert ( rx->prod == doorbell );
1731
1732 } else {
1733
1734 /* The in-order receive descriptors are prepopulated
1735 * at the time of creating the receive queue (pointing
1736 * to the preallocated queue pages). Refilling is
1737 * therefore just a case of ringing the doorbell if
1738 * the device is not yet aware of any available
1739 * descriptors.
1740 */
1741 rx->prod += refill;
1742 assert ( rx->prod == doorbell );
1743 DBGC2 ( gve, "GVE %p RXD %#04x ready\n", gve, rx->prod );
1744
1745 /* Doorbell is big-endian */
1747 }
1748
1749 /* Ring doorbell */
1750 writel ( doorbell, rx->db );
1751}
1752
1753/**
1754 * Poll for completed and received packets
1755 *
1756 * @v netdev Network device
1757 */
1758static void gve_poll ( struct net_device *netdev ) {
1759 struct gve_nic *gve = netdev->priv;
1760
1761 /* Do nothing if queues are not yet set up */
1762 if ( ! netdev_link_ok ( netdev ) )
1763 return;
1764
1765 /* Poll for transmit completions */
1766 gve_poll_tx ( netdev );
1767
1768 /* Poll for receive completions */
1769 gve_poll_rx ( netdev );
1770
1771 /* Refill receive queue */
1773
1774 /* Rearm queue interrupts if applicable */
1775 if ( gve->mode & GVE_MODE_DQO ) {
1778 }
1779}
1780
1781/** GVE network device operations */
1783 .open = gve_open,
1784 .close = gve_close,
1785 .transmit = gve_transmit,
1786 .poll = gve_poll,
1787};
1788
1789/******************************************************************************
1790 *
1791 * PCI interface
1792 *
1793 ******************************************************************************
1794 */
1795
1796/** Transmit descriptor queue type */
1797static const struct gve_queue_type gve_tx_type = {
1798 .name = "TX",
1799 .param = gve_create_tx_param,
1800 .qpl = GVE_TX_QPL,
1801 .irq = GVE_TX_IRQ,
1802 .fill = GVE_TX_FILL,
1803 .stride = {
1804 .gqi = {
1805 .desc = sizeof ( struct gve_gqi_tx_descriptor ),
1806 },
1807 .dqo = {
1808 .desc = sizeof ( struct gve_dqo_tx_descriptor ),
1809 .cmplt = sizeof ( struct gve_dqo_tx_completion ),
1810 },
1811 },
1812 .create = GVE_ADMIN_CREATE_TX,
1813 .destroy = GVE_ADMIN_DESTROY_TX,
1814};
1815
1816/** Receive descriptor queue type */
1817static const struct gve_queue_type gve_rx_type = {
1818 .name = "RX",
1819 .param = gve_create_rx_param,
1820 .qpl = GVE_RX_QPL,
1821 .irq = GVE_RX_IRQ,
1822 .fill = GVE_RX_FILL,
1823 .stride = {
1824 .gqi = {
1825 .desc = sizeof ( struct gve_gqi_rx_descriptor ),
1826 .cmplt = sizeof ( struct gve_gqi_rx_completion ),
1827 },
1828 .dqo = {
1829 .desc = sizeof ( struct gve_dqo_rx_descriptor ),
1830 .cmplt = sizeof ( struct gve_dqo_rx_completion ),
1831 },
1832 },
1833 .create = GVE_ADMIN_CREATE_RX,
1834 .destroy = GVE_ADMIN_DESTROY_RX,
1835};
1836
1837/**
1838 * Set up admin queue and get device description
1839 *
1840 * @v gve GVE device
1841 * @ret rc Return status code
1842 */
1843static int gve_setup ( struct gve_nic *gve ) {
1844 unsigned int i;
1845 int rc;
1846
1847 /* Attempt several times, since the device may decide to add
1848 * in a few spurious resets.
1849 */
1850 for ( i = 0 ; i < GVE_RESET_MAX_RETRY ; i++ ) {
1851
1852 /* Reset device */
1853 if ( ( rc = gve_reset ( gve ) ) != 0 )
1854 continue;
1855
1856 /* Enable admin queue */
1857 gve_admin_enable ( gve );
1858
1859 /* Fetch MAC address */
1860 if ( ( rc = gve_describe ( gve ) ) != 0 )
1861 continue;
1862
1863 /* Success */
1864 return 0;
1865 }
1866
1867 DBGC ( gve, "GVE %p failed to get device description: %s\n",
1868 gve, strerror ( rc ) );
1869 return rc;
1870}
1871
1872/** Device startup process descriptor */
1875
1876/**
1877 * Probe PCI device
1878 *
1879 * @v pci PCI device
1880 * @ret rc Return status code
1881 */
1882static int gve_probe ( struct pci_device *pci ) {
1883 struct net_device *netdev;
1884 struct gve_nic *gve;
1885 unsigned long cfg_start;
1886 unsigned long db_start;
1887 unsigned long db_size;
1888 int rc;
1889
1890 /* Allocate and initialise net device */
1891 netdev = alloc_etherdev ( sizeof ( *gve ) );
1892 if ( ! netdev ) {
1893 rc = -ENOMEM;
1894 goto err_alloc;
1895 }
1897 gve = netdev->priv;
1898 pci_set_drvdata ( pci, netdev );
1899 netdev->dev = &pci->dev;
1900 memset ( gve, 0, sizeof ( *gve ) );
1901 gve->netdev = netdev;
1902 gve->tx.type = &gve_tx_type;
1903 gve->rx.type = &gve_rx_type;
1904 gve->tx.tag = gve->tx_tag;
1905 gve->rx.tag = gve->rx_tag;
1907 &netdev->refcnt );
1908 timer_init ( &gve->watchdog, gve_watchdog, &netdev->refcnt );
1909
1910 /* Fix up PCI device */
1911 adjust_pci_device ( pci );
1912
1913 /* Check PCI revision */
1915 DBGC ( gve, "GVE %p is revision %#02x\n", gve, gve->revision );
1916
1917 /* Map configuration registers */
1918 cfg_start = pci_bar_start ( pci, GVE_CFG_BAR );
1919 gve->cfg = pci_ioremap ( pci, cfg_start, GVE_CFG_SIZE );
1920 if ( ! gve->cfg ) {
1921 rc = -ENODEV;
1922 goto err_cfg;
1923 }
1924
1925 /* Map doorbell registers */
1926 db_start = pci_bar_start ( pci, GVE_DB_BAR );
1927 db_size = pci_bar_size ( pci, GVE_DB_BAR );
1928 gve->db = pci_ioremap ( pci, db_start, db_size );
1929 if ( ! gve->db ) {
1930 rc = -ENODEV;
1931 goto err_db;
1932 }
1933
1934 /* Configure DMA */
1935 gve->dma = &pci->dma;
1936 dma_set_mask_64bit ( gve->dma );
1937 assert ( netdev->dma == NULL );
1938
1939 /* Configure dummy MSI-X interrupt */
1940 if ( ( rc = pci_msix_enable ( pci, &gve->msix ) ) != 0 )
1941 goto err_msix;
1942
1943 /* Allocate admin queue */
1944 if ( ( rc = gve_admin_alloc ( gve ) ) != 0 )
1945 goto err_admin;
1946
1947 /* Set up the device */
1948 if ( ( rc = gve_setup ( gve ) ) != 0 )
1949 goto err_setup;
1950
1951 /* Register network device */
1952 if ( ( rc = register_netdev ( netdev ) ) != 0 )
1953 goto err_register_netdev;
1954
1955 return 0;
1956
1958 err_register_netdev:
1959 err_setup:
1960 gve_reset ( gve );
1961 gve_admin_free ( gve );
1962 err_admin:
1963 pci_msix_disable ( pci, &gve->msix );
1964 err_msix:
1965 iounmap ( gve->db );
1966 err_db:
1967 iounmap ( gve->cfg );
1968 err_cfg:
1970 netdev_put ( netdev );
1971 err_alloc:
1972 return rc;
1973}
1974
1975/**
1976 * Remove PCI device
1977 *
1978 * @v pci PCI device
1979 */
1980static void gve_remove ( struct pci_device *pci ) {
1981 struct net_device *netdev = pci_get_drvdata ( pci );
1982 struct gve_nic *gve = netdev->priv;
1983
1984 /* Unregister network device */
1986
1987 /* Reset device */
1988 gve_reset ( gve );
1989
1990 /* Free admin queue */
1991 gve_admin_free ( gve );
1992
1993 /* Disable dummy MSI-X interrupt */
1994 pci_msix_disable ( pci, &gve->msix );
1995
1996 /* Unmap registers */
1997 iounmap ( gve->db );
1998 iounmap ( gve->cfg );
1999
2000 /* Free network device */
2002 netdev_put ( netdev );
2003}
2004
2005/** GVE PCI device IDs */
2006static struct pci_device_id gve_nics[] = {
2007 PCI_ROM ( 0x1ae0, 0x0042, "gve", "gVNIC", 0 ),
2008};
2009
2010/** GVE PCI driver */
2011struct pci_driver gve_driver __pci_driver = {
2012 .ids = gve_nics,
2013 .id_count = ( sizeof ( gve_nics ) / sizeof ( gve_nics[0] ) ),
2014 .probe = gve_probe,
2015 .remove = gve_remove,
2016};
#define NULL
NULL pointer (VOID *)
Definition Base.h:322
struct golan_eqe_cmd cmd
Definition CIB_PRM.h:1
typeof(acpi_finder=acpi_find)
ACPI table finder.
Definition acpi.c:48
struct arbelprm_rc_send_wqe rc
Definition arbel.h:3
unsigned int uint32_t
Definition stdint.h:12
unsigned long physaddr_t
Definition stdint.h:20
unsigned long long uint64_t
Definition stdint.h:13
long index
Definition bigint.h:65
static int fill
Definition string.h:209
Assertions.
#define build_assert(condition)
Assert a condition at build time (after dead code elimination)
Definition assert.h:77
#define assert(condition)
Assert a condition at run-time.
Definition assert.h:50
#define max(x, y)
Definition ath.h:41
struct bofm_section_header done
Definition bofm_test.c:46
uint16_t offset
Offset to command line.
Definition bzimage.h:3
static union @024010030001061367220137227263210031030210157031 opts
"cert<xxx>" option list
#define VM_MIGRATED_RATE
Definition fault.h:37
uint32_t next
Next descriptor address.
Definition dwmac.h:11
ring len
Length.
Definition dwmac.h:226
uint32_t addr
Buffer address.
Definition dwmac.h:9
uint64_t tag
Identity tag.
Definition edd.h:1
uint8_t id
Request identifier.
Definition ena.h:1
uint32_t type
Operating system type.
Definition ena.h:1
uint16_t queue
Queue ID.
Definition ena.h:11
struct ena_llq_option stride
Descriptor strides.
Definition ena.h:11
uint8_t status
Status.
Definition ena.h:5
struct ena_llq_option desc
Descriptor counts.
Definition ena.h:9
uint8_t opcode
Opcode.
Definition ena.h:5
uint16_t mode
Acceleration mode.
Definition ena.h:15
uint32_t doorbell
Doorbell register offset.
Definition ena.h:7
Error codes.
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition ethernet.c:265
const char * eth_ntoa(const void *ll_addr)
Transcribe Ethernet address.
Definition ethernet.c:176
Ethernet protocol.
static struct net_device * netdev
Definition gdbudp.c:53
#define __unused
Declare a variable or data structure as unused.
Definition compiler.h:573
#define DBGC2(...)
Definition compiler.h:522
#define DBGC2_HDA(...)
Definition compiler.h:523
#define DBGC(...)
Definition compiler.h:505
#define DBGC_HDA(...)
Definition compiler.h:506
static unsigned int count
Number of entries.
Definition dwmac.h:220
#define FILE_LICENCE(_licence)
Declare a particular licence as applying to a file.
Definition compiler.h:896
#define EINVAL
Invalid argument.
Definition errno.h:429
#define ETIMEDOUT
Connection timed out.
Definition errno.h:670
#define ENOMEM
Not enough space.
Definition errno.h:535
#define EIO
Input/output error.
Definition errno.h:434
#define ECANCELED
Operation canceled.
Definition errno.h:344
#define ENETDOWN
Network is down.
Definition errno.h:479
#define ENODEV
No such device.
Definition errno.h:510
#define ECONNRESET
Connection reset.
Definition errno.h:364
static void gve_free_queue(struct gve_nic *gve, struct gve_queue *queue)
Free descriptor queue.
Definition gve.c:1087
static int gve_admin_wait(struct gve_nic *gve)
Wait for admin queue command to complete.
Definition gve.c:370
static void gve_close(struct net_device *netdev)
Close network device.
Definition gve.c:1338
static void gve_remove(struct pci_device *pci)
Remove PCI device.
Definition gve.c:1980
static void gve_startup(struct gve_nic *gve)
Device startup process.
Definition gve.c:1205
static void gve_free_shared(struct gve_nic *gve)
Free shared queue resources.
Definition gve.c:902
static struct process_descriptor gve_startup_desc
Device startup process descriptor.
Definition gve.c:1873
static unsigned int gve_next(unsigned int seq)
Calculate next receive sequence number.
Definition gve.c:971
static int gve_reset(struct gve_nic *gve)
Reset hardware.
Definition gve.c:193
static int gve_admin(struct gve_nic *gve)
Issue admin queue command.
Definition gve.c:406
static const char * gve_mode_name(unsigned int mode)
Get operating mode name (for debugging)
Definition gve.c:245
static void gve_restart(struct gve_nic *gve)
Trigger startup process.
Definition gve.c:1242
static int gve_setup(struct gve_nic *gve)
Set up admin queue and get device description.
Definition gve.c:1843
static int gve_transmit(struct net_device *netdev, struct io_buffer *iobuf)
Transmit packet.
Definition gve.c:1371
static int gve_configure(struct gve_nic *gve)
Configure device resources.
Definition gve.c:590
static int gve_unregister(struct gve_nic *gve, struct gve_qpl *qpl)
Unregister page list.
Definition gve.c:691
static void gve_poll_tx(struct net_device *netdev)
Poll for completed transmissions.
Definition gve.c:1482
static int gve_register(struct gve_nic *gve, struct gve_qpl *qpl)
Register queue page list.
Definition gve.c:652
static struct pci_device_id gve_nics[]
GVE PCI device IDs.
Definition gve.c:2006
static void gve_cancel_tx(struct gve_nic *gve)
Cancel any pending transmissions.
Definition gve.c:1112
static int gve_describe(struct gve_nic *gve)
Get device descriptor.
Definition gve.c:488
static const struct gve_queue_type gve_tx_type
Transmit descriptor queue type.
Definition gve.c:1797
static size_t gve_offset(struct gve_queue *queue, unsigned int tag)
Get buffer offset (within queue page list allocation)
Definition gve.c:142
static int gve_alloc_shared(struct gve_nic *gve)
Allocate shared queue resources.
Definition gve.c:860
static int gve_admin_simple(struct gve_nic *gve, unsigned int opcode, unsigned int id)
Issue simple admin queue command.
Definition gve.c:465
static int gve_open(struct net_device *netdev)
Open network device.
Definition gve.c:1298
static int gve_admin_alloc(struct gve_nic *gve)
Allocate admin queue.
Definition gve.c:260
static void gve_create_tx_param(struct gve_queue *queue, uint32_t qpl, union gve_admin_command *cmd)
Construct command to create transmit queue.
Definition gve.c:714
static int gve_start(struct gve_nic *gve)
Start up device.
Definition gve.c:1133
static physaddr_t gve_address(struct gve_queue *queue, unsigned int tag)
Get buffer address (within queue page list address space)
Definition gve.c:160
static void gve_refill_rx(struct net_device *netdev)
Refill receive queue.
Definition gve.c:1694
#define EIO_ADMIN(status)
Definition gve.c:117
static union gve_admin_command * gve_admin_command(struct gve_nic *gve)
Get next available admin queue command slot.
Definition gve.c:349
static struct net_device_operations gve_operations
GVE network device operations.
Definition gve.c:1782
static void gve_admin_free(struct gve_nic *gve)
Free admin queue.
Definition gve.c:301
static void gve_create_rx_param(struct gve_queue *queue, uint32_t qpl, union gve_admin_command *cmd)
Construct command to create receive queue.
Definition gve.c:740
static int gve_probe(struct pci_device *pci)
Probe PCI device.
Definition gve.c:1882
static int gve_create_queue(struct gve_nic *gve, struct gve_queue *queue)
Create transmit or receive queue.
Definition gve.c:765
static const struct gve_queue_type gve_rx_type
Receive descriptor queue type.
Definition gve.c:1817
static void gve_poll_rx(struct net_device *netdev)
Poll for received packets.
Definition gve.c:1564
static int gve_deconfigure(struct gve_nic *gve)
Deconfigure device resources.
Definition gve.c:635
static int gve_alloc_queue(struct gve_nic *gve, struct gve_queue *queue)
Allocate descriptor queue.
Definition gve.c:996
static void gve_admin_enable(struct gve_nic *gve)
Enable admin queue.
Definition gve.c:319
static int gve_destroy_queue(struct gve_nic *gve, struct gve_queue *queue)
Destroy transmit or receive queue.
Definition gve.c:833
static void * gve_buffer(struct gve_queue *queue, unsigned int tag)
Get buffer address.
Definition gve.c:174
static void gve_stop(struct gve_nic *gve)
Stop device.
Definition gve.c:1184
static void gve_poll(struct net_device *netdev)
Poll for completed and received packets.
Definition gve.c:1758
static void gve_watchdog(struct retry_timer *timer, int over __unused)
Reset recovery watchdog.
Definition gve.c:1258
static int gve_alloc_qpl(struct gve_nic *gve, struct gve_qpl *qpl, uint32_t id, unsigned int buffers)
Allocate queue page list.
Definition gve.c:924
static void gve_free_qpl(struct gve_nic *nic __unused, struct gve_qpl *qpl)
Free queue page list.
Definition gve.c:956
Google Virtual Ethernet network driver.
#define GVE_OPT_DQO_QPL
Out-of-order descriptor queues with queue page list addressing.
Definition gve.h:199
#define GVE_GQI_RXF_ERROR
Receive error.
Definition gve.h:663
#define GVE_DQO_TXF_PKT
Transmit completion packet flag.
Definition gve.h:636
#define GVE_IRQ_COUNT
Number of interrupt channels.
Definition gve.h:435
#define GVE_CFG_ADMIN_PFN
Admin queue page frame number (for older devices)
Definition gve.h:75
#define GVE_DQO_TX_TYPE_LAST
Last transmit descriptor in a packet.
Definition gve.h:621
#define GVE_PAGE_SIZE
Page size.
Definition gve.h:42
#define GVE_DB_BAR
Doorbell BAR.
Definition gve.h:96
#define GVE_ADMIN_DESCRIBE_VER
Device descriptor version.
Definition gve.h:139
#define GVE_ADMIN_COUNT
Number of admin queue commands.
Definition gve.h:366
#define GVE_WATCHDOG_TIMEOUT
Time between reset recovery checks.
Definition gve.h:913
#define GVE_GQI_IRQ_DISABLE
Disable in-order queue interrupt.
Definition gve.h:448
#define GVE_DQO_RXL_GEN
Receive completion generation flag.
Definition gve.h:724
#define GVE_RX_IRQ
Receive queue interrupt channel.
Definition gve.h:654
#define GVE_CFG_SIZE
Configuration BAR size.
Definition gve.h:61
#define GVE_MODE_DQO
Use out-of-order queues.
Definition gve.h:904
#define GVE_RESET_MAX_RETRY
Maximum number of times to reattempt device reset.
Definition gve.h:910
#define GVE_BUF_SIZE
Queue data buffer size.
Definition gve.h:483
#define GVE_ALIGN
Address alignment.
Definition gve.h:51
#define GVE_DQO_IRQ_REARM
Rearm out-of-order queue interrupt.
Definition gve.h:451
#define GVE_RX_QPL
Receive queue page list ID.
Definition gve.h:651
#define GVE_CFG_DRVSTAT
Driver status.
Definition gve.h:68
#define GVE_CFG_DRVSTAT_RUN
Run admin queue.
Definition gve.h:69
#define GVE_ADMIN_CREATE_RX
Create receive queue command.
Definition gve.h:293
#define GVE_CFG_ADMIN_DB
Admin queue doorbell.
Definition gve.h:78
#define GVE_GQI_TX_TYPE_CONT
Continuation of packet transmit descriptor type.
Definition gve.h:583
#define GVE_TX_FILL
Maximum number of transmit buffers.
Definition gve.h:549
#define GVE_CFG_BAR
Configuration BAR.
Definition gve.h:54
#define GVE_ADMIN_DESTROY_TX
Destroy transmit queue command.
Definition gve.h:326
#define GVE_TX_QPL
Transmit queue page list ID.
Definition gve.h:552
#define GVE_GQI_RX_SEQ_MASK
Receive sequence number mask.
Definition gve.h:669
#define GVE_FORMAT(mode)
Descriptor queue format.
Definition gve.h:227
#define GVE_CFG_ADMIN_EVT
Admin queue event counter.
Definition gve.h:81
#define GVE_ADMIN_MAX_WAIT_MS
Maximum time to wait for admin queue commands.
Definition gve.h:907
#define GVE_CFG_ADMIN_LEN
Admin queue base address length (16-bit register)
Definition gve.h:93
#define GVE_DQO_TX_TYPE_PACKET
Normal packet transmit descriptor type.
Definition gve.h:618
#define GVE_ADMIN_CONFIGURE
Configure device resources command.
Definition gve.h:202
#define GVE_GQI_RX_PAD
Padding at the start of all received packets.
Definition gve.h:684
#define GVE_ADMIN_UNREGISTER
Unregister page list command.
Definition gve.h:261
#define GVE_RX_FILL
Maximum number of receive buffers.
Definition gve.h:648
#define GVE_ADMIN_DESTROY_RX
Destroy receive queue command.
Definition gve.h:329
#define GVE_ADMIN_CREATE_TX
Create transmit queue command.
Definition gve.h:264
#define GVE_BUF_PER_PAGE
Number of data buffers per page.
Definition gve.h:486
#define GVE_ADMIN_REGISTER
Register page list command.
Definition gve.h:230
#define GVE_MODE_QPL
Operating mode.
Definition gve.h:903
#define GVE_DQO_TXF_GEN
Transmit completion generation flag.
Definition gve.h:639
#define GVE_OPT_GQI_QPL
In-order descriptor queues with queue page list addressing.
Definition gve.h:193
#define GVE_CFG_DEVSTAT
Device status.
Definition gve.h:64
#define GVE_RESET_MAX_WAIT_MS
Maximum time to wait for reset.
Definition gve.h:72
#define GVE_ADMIN_DECONFIGURE
Deconfigure device resources command.
Definition gve.h:332
#define GVE_GQI_RXF_MORE
Receive packet continues into next descriptor.
Definition gve.h:666
#define GVE_TX_IRQ
Tranmsit queue interrupt channel.
Definition gve.h:555
#define GVE_OPT_DQO_RDA
Out-of-order descriptor queues with raw DMA addressing.
Definition gve.h:196
#define GVE_OPT_GQI_RDA
In-order descriptor queues with raw DMA addressing.
Definition gve.h:190
#define GVE_CFG_ADMIN_BASE_LO
Admin queue base address low 32 bits.
Definition gve.h:90
#define GVE_DQO_RXF_LAST
Last receive descriptor in a packet.
Definition gve.h:727
#define GVE_RAW_QPL
Raw DMA addressing queue page list ID.
Definition gve.h:542
#define GVE_GQI_TX_TYPE_START
Start of packet transmit descriptor type.
Definition gve.h:580
#define GVE_ADMIN_STATUS_OK
Command succeeded.
Definition gve.h:113
#define GVE_DQO_RXS_ERROR
Receive error.
Definition gve.h:721
#define GVE_CFG_ADMIN_BASE_HI
Admin queue base address high 32 bits.
Definition gve.h:87
#define GVE_ADMIN_DESCRIBE
Describe device command.
Definition gve.h:124
#define GVE_QPL_MAX
Maximum number of pages per queue.
Definition gve.h:252
u16 seq
802.11 Sequence Control field
Definition ieee80211.h:5
#define ETH_ALEN
Definition if_ether.h:9
#define ETH_HLEN
Definition if_ether.h:10
#define bswap_32(value)
Definition byteswap.h:71
#define be32_to_cpu(value)
Definition byteswap.h:117
#define cpu_to_be16(value)
Definition byteswap.h:110
#define cpu_to_le64(value)
Definition byteswap.h:109
#define le16_to_cpu(value)
Definition byteswap.h:113
#define le64_to_cpu(value)
Definition byteswap.h:115
#define cpu_to_be32(value)
Definition byteswap.h:111
#define cpu_to_le16(value)
Definition byteswap.h:107
#define bswap_16(value)
Definition byteswap.h:59
#define cpu_to_be64(value)
Definition byteswap.h:112
#define be16_to_cpu(value)
Definition byteswap.h:116
#define __attribute__(x)
Definition compiler.h:10
static unsigned int unsigned int bit
Definition bigint.h:392
Fault injection.
#define rmb()
Definition io.h:545
#define wmb()
Definition io.h:546
void iounmap(volatile const void *io_addr)
Unmap I/O address.
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
int pci_read_config_byte(struct pci_device *pci, unsigned int where, uint8_t *value)
Read byte from PCI configuration space.
String functions.
void * memcpy(void *dest, const void *src, size_t len) __nonnull
void * memset(void *dest, int character, size_t len) __nonnull
void startup(void)
Start up iPXE.
Definition init.c:70
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition iobuf.c:131
I/O buffers.
#define iob_put(iobuf, len)
Definition iobuf.h:125
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition iobuf.h:160
#define iob_pull(iobuf, len)
Definition iobuf.h:107
char * inet_ntoa(struct in_addr in)
Convert IPv4 address to dotted-quad notation.
Definition ipv4.c:814
DMA mappings.
static __always_inline void dma_set_mask_64bit(struct dma_device *dma)
Set 64-bit addressable space mask.
Definition dma.h:467
void dma_free(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
void dma_ufree(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer from external (user) memory.
void * dma_alloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer.
physaddr_t dma(struct dma_mapping *map, void *addr)
Get DMA address from virtual address.
void * dma_umalloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer from external (user) memory.
uint32_t base
Base.
Definition librm.h:3
void netdev_link_err(struct net_device *netdev, int rc)
Mark network device as having a specific link state.
Definition netdevice.c:208
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition netdevice.c:231
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition netdevice.c:549
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition netdevice.c:942
void netdev_tx_defer(struct net_device *netdev, struct io_buffer *iobuf)
Defer transmitted packet.
Definition netdevice.c:413
void netdev_tx_complete_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Complete network transmission.
Definition netdevice.c:471
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition netdevice.c:587
int register_netdev(struct net_device *netdev)
Register network device.
Definition netdevice.c:760
Network device management.
static int netdev_link_ok(struct net_device *netdev)
Check link state of network device.
Definition netdevice.h:640
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition netdevice.h:789
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition netdevice.h:519
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition netdevice.h:532
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition netdevice.h:576
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition netdevice.h:767
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Get the size of a PCI BAR.
Definition pci.c:164
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition pci.c:241
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition pci.c:97
PCI bus.
#define __pci_driver
Declare a PCI driver.
Definition pci.h:278
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition pci.h:366
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition pci.h:308
#define PCI_REVISION
PCI revision.
Definition pci.h:45
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition pci.h:376
int pci_msix_enable(struct pci_device *pci, struct pci_msix *msix)
Enable MSI-X interrupts.
Definition pcimsix.c:137
void pci_msix_disable(struct pci_device *pci, struct pci_msix *msix)
Disable MSI-X interrupts.
Definition pcimsix.c:208
static void pci_msix_mask(struct pci_msix *msix, unsigned int vector)
Mask MSI-X interrupt vector.
Definition pcimsix.h:65
static void pci_msix_unmask(struct pci_msix *msix, unsigned int vector)
Unmask MSI-X interrupt vector.
Definition pcimsix.h:77
void process_del(struct process *process)
Remove process from process list.
Definition process.c:80
void process_add(struct process *process)
Add process to process list.
Definition process.c:60
#define PROC_DESC_ONCE(object_type, process, _step)
Define a process descriptor for a process that runs only once.
Definition process.h:98
static void process_init_stopped(struct process *process, struct process_descriptor *desc, struct refcnt *refcnt)
Initialise process without adding to process list.
Definition process.h:146
void start_timer_fixed(struct retry_timer *timer, unsigned long timeout)
Start timer with a specified timeout.
Definition retry.c:65
void stop_timer(struct retry_timer *timer)
Stop timer.
Definition retry.c:118
#define offsetof(type, field)
Get offset of a field within a structure.
Definition stddef.h:25
#define container_of(ptr, type, field)
Get containing structure.
Definition stddef.h:36
char * strerror(int errno)
Retrieve string representation of error number.
Definition strerror.c:79
A DMA-capable device.
Definition dma.h:48
Create receive queue command.
Definition gve.h:296
Create transmit queue command.
Definition gve.h:267
Simple admin command.
Definition gve.h:116
Admin queue.
Definition gve.h:369
uint32_t prod
Producer counter.
Definition gve.h:373
union gve_admin_command * cmd
Commands.
Definition gve.h:371
struct dma_mapping map
DMA mapping.
Definition gve.h:375
A transmit or receive buffer descriptor.
Definition gve.h:558
uint64_t addr
Address (within queue page list address space)
Definition gve.h:560
Device descriptor.
Definition gve.h:142
An out-of-order receive completion.
Definition gve.h:699
uint16_t len
Length and generation bit.
Definition gve.h:707
uint8_t tag
Tag.
Definition gve.h:715
uint8_t flags
Flags.
Definition gve.h:711
uint8_t status
Status.
Definition gve.h:703
An out-of-order receive descriptor.
Definition gve.h:687
struct gve_buffer buf
Buffer descriptor.
Definition gve.h:693
uint8_t tag
Tag.
Definition gve.h:689
An out-of-order transmit completion.
Definition gve.h:624
struct gve_dqo_tx_tag tag
Tag.
Definition gve.h:630
uint8_t flags
Completion flags.
Definition gve.h:628
An out-of-order transmit descriptor.
Definition gve.h:604
uint16_t len
Length of this descriptor.
Definition gve.h:614
struct gve_dqo_tx_tag tag
Tag.
Definition gve.h:612
struct gve_buffer buf
Buffer descriptor.
Definition gve.h:606
uint8_t type
Descriptor type and flags.
Definition gve.h:608
int8_t count
Number of descriptors covered by this completion.
Definition gve.h:600
uint8_t id
Buffer index within queue page list.
Definition gve.h:593
Event counter array.
Definition gve.h:404
struct gve_event * event
Event counters.
Definition gve.h:406
struct dma_mapping map
DMA mapping.
Definition gve.h:408
unsigned int count
Actual number of event counters.
Definition gve.h:410
An in-order receive completion descriptor.
Definition gve.h:672
uint16_t len
Length.
Definition gve.h:676
uint8_t seq
Sequence number.
Definition gve.h:680
uint8_t flags
Flags.
Definition gve.h:678
An in-order receive descriptor.
Definition gve.h:657
An in-order transmit descriptor.
Definition gve.h:564
uint8_t type
Type.
Definition gve.h:566
uint16_t total
Total length of this packet.
Definition gve.h:572
uint8_t count
Number of descriptors in this packet.
Definition gve.h:570
uint16_t len
Length of this descriptor.
Definition gve.h:574
uint32_t db_idx
Interrupt doorbell index (within doorbell BAR)
Definition gve.h:416
Interrupt channel array.
Definition gve.h:438
struct dma_mapping map
DMA mapping.
Definition gve.h:442
volatile uint32_t * db[GVE_IRQ_COUNT]
Interrupt doorbells.
Definition gve.h:444
struct gve_irq * irq
Interrupt channels.
Definition gve.h:440
A Google Virtual Ethernet NIC.
Definition gve.h:845
uint8_t revision
PCI revision.
Definition gve.h:851
uint32_t options
Supported options.
Definition gve.h:868
struct gve_scratch scratch
Scratch buffer.
Definition gve.h:866
unsigned int mode
Operating mode.
Definition gve.h:870
void * cfg
Configuration registers.
Definition gve.h:847
struct gve_queue tx
Transmit queue.
Definition gve.h:873
struct io_buffer * tx_iobuf[GVE_TX_FILL]
Transmit I/O buffers (indexed by tag)
Definition gve.h:877
void * db
Doorbell registers.
Definition gve.h:849
struct net_device * netdev
Network device.
Definition gve.h:853
unsigned int seq
Receive sequence number.
Definition gve.h:885
struct gve_events events
Event counters.
Definition gve.h:864
struct pci_msix msix
Dummy MSI-X interrupt.
Definition gve.h:857
struct gve_irqs irqs
Interrupt channels.
Definition gve.h:862
uint32_t activity
Reset recovery recorded activity counter.
Definition gve.h:894
uint8_t rx_tag[GVE_RX_FILL]
Receive tag ring.
Definition gve.h:883
uint8_t tx_chain[GVE_TX_FILL]
Transmit tag chain.
Definition gve.h:879
uint8_t tx_tag[GVE_TX_FILL]
Transmit tag ring.
Definition gve.h:881
struct gve_admin admin
Admin queue.
Definition gve.h:860
unsigned int retries
Startup process retry counter.
Definition gve.h:890
struct process startup
Startup process.
Definition gve.h:888
struct retry_timer watchdog
Reset recovery watchdog timer.
Definition gve.h:892
struct gve_queue rx
Receive queue.
Definition gve.h:875
struct dma_device * dma
DMA device.
Definition gve.h:855
Device option header.
Definition gve.h:175
uint16_t len
Length (excluding this header)
Definition gve.h:179
uint16_t id
Option ID.
Definition gve.h:177
Page list.
Definition gve.h:255
uint64_t addr[GVE_QPL_MAX]
Page address.
Definition gve.h:257
Queue page list.
Definition gve.h:522
void * data
Page addresses.
Definition gve.h:524
struct dma_mapping map
Page mapping.
Definition gve.h:526
unsigned int count
Number of pages.
Definition gve.h:528
unsigned int id
Queue page list ID.
Definition gve.h:530
physaddr_t base
Queue page list base device address.
Definition gve.h:538
Queue strides.
Definition gve.h:730
A descriptor queue type.
Definition gve.h:813
uint8_t create
Command to create queue.
Definition gve.h:839
uint32_t qpl
Queue page list ID.
Definition gve.h:826
A descriptor queue.
Definition gve.h:738
uint8_t * tag
Tag ring.
Definition gve.h:806
const struct gve_queue_type * type
Queue type.
Definition gve.h:779
unsigned int count
Number of descriptors (must be a power of two)
Definition gve.h:783
uint32_t cons
Consumer counter.
Definition gve.h:802
Scratch buffer for admin queue commands.
Definition gve.h:379
struct dma_mapping map
DMA mapping.
Definition gve.h:388
union gve_scratch::@025171361064105355267231004351370162360052020120 * buf
Buffer contents.
struct gve_pages pages
Page address list.
Definition gve.h:385
struct gve_device_descriptor desc
Device descriptor.
Definition gve.h:383
A persistent I/O buffer.
Definition iobuf.h:38
void * data
Start of data.
Definition iobuf.h:53
Network device operations.
Definition netdevice.h:214
A network device.
Definition netdevice.h:353
Definition nic.h:49
A PCI device ID list entry.
Definition pci.h:175
A PCI device.
Definition pci.h:211
struct device dev
Generic device.
Definition pci.h:213
struct dma_device dma
DMA device.
Definition pci.h:215
A PCI driver.
Definition pci.h:252
int(* probe)(struct pci_device *pci)
Probe device.
Definition pci.h:265
A process descriptor.
Definition process.h:32
A retry timer.
Definition retry.h:22
A timer.
Definition timer.h:29
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition timer.c:79
An admin queue command.
Definition gve.h:335
int snprintf(char *buf, size_t size, const char *fmt,...)
Write a formatted string to a buffer.
Definition vsprintf.c:383
#define readl
Definition w89c840.c:157
#define writel
Definition w89c840.c:160
u8 tx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets to the AP.
Definition wpa.h:4
u8 rx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets from the AP.
Definition wpa.h:1
static struct xen_remove_from_physmap * remove
Definition xenmem.h:40