|
| #define | EINFO_EIO_ADMIN_UNSET __einfo_uniqify ( EINFO_EIO, 0x00, "Uncompleted" ) |
| #define | EIO_ADMIN_UNSET __einfo_error ( EINFO_EIO_ADMIN_UNSET ) |
| #define | EINFO_EIO_ADMIN_ABORTED __einfo_uniqify ( EINFO_EIO, 0x10, "Aborted" ) |
| #define | EIO_ADMIN_ABORTED __einfo_error ( EINFO_EIO_ADMIN_ABORTED ) |
| #define | EINFO_EIO_ADMIN_EXISTS __einfo_uniqify ( EINFO_EIO, 0x11, "Already exists" ) |
| #define | EIO_ADMIN_EXISTS __einfo_error ( EINFO_EIO_ADMIN_EXISTS ) |
| #define | EINFO_EIO_ADMIN_CANCELLED __einfo_uniqify ( EINFO_EIO, 0x12, "Cancelled" ) |
| #define | EIO_ADMIN_CANCELLED __einfo_error ( EINFO_EIO_ADMIN_CANCELLED ) |
| #define | EINFO_EIO_ADMIN_DATALOSS __einfo_uniqify ( EINFO_EIO, 0x13, "Data loss" ) |
| #define | EIO_ADMIN_DATALOSS __einfo_error ( EINFO_EIO_ADMIN_DATALOSS ) |
| #define | EINFO_EIO_ADMIN_DEADLINE __einfo_uniqify ( EINFO_EIO, 0x14, "Deadline exceeded" ) |
| #define | EIO_ADMIN_DEADLINE __einfo_error ( EINFO_EIO_ADMIN_DEADLINE ) |
| #define | EINFO_EIO_ADMIN_PRECONDITION __einfo_uniqify ( EINFO_EIO, 0x15, "Failed precondition" ) |
| #define | EIO_ADMIN_PRECONDITION __einfo_error ( EINFO_EIO_ADMIN_PRECONDITION ) |
| #define | EINFO_EIO_ADMIN_INTERNAL __einfo_uniqify ( EINFO_EIO, 0x16, "Internal error" ) |
| #define | EIO_ADMIN_INTERNAL __einfo_error ( EINFO_EIO_ADMIN_INTERNAL ) |
| #define | EINFO_EIO_ADMIN_INVAL __einfo_uniqify ( EINFO_EIO, 0x17, "Invalid argument" ) |
| #define | EIO_ADMIN_INVAL __einfo_error ( EINFO_EIO_ADMIN_INVAL ) |
| #define | EINFO_EIO_ADMIN_NOT_FOUND __einfo_uniqify ( EINFO_EIO, 0x18, "Not found" ) |
| #define | EIO_ADMIN_NOT_FOUND __einfo_error ( EINFO_EIO_ADMIN_NOT_FOUND ) |
| #define | EINFO_EIO_ADMIN_RANGE __einfo_uniqify ( EINFO_EIO, 0x19, "Out of range" ) |
| #define | EIO_ADMIN_RANGE __einfo_error ( EINFO_EIO_ADMIN_RANGE ) |
| #define | EINFO_EIO_ADMIN_PERM __einfo_uniqify ( EINFO_EIO, 0x1a, "Permission denied" ) |
| #define | EIO_ADMIN_PERM __einfo_error ( EINFO_EIO_ADMIN_PERM ) |
| #define | EINFO_EIO_ADMIN_UNAUTH __einfo_uniqify ( EINFO_EIO, 0x1b, "Unauthenticated" ) |
| #define | EIO_ADMIN_UNAUTH __einfo_error ( EINFO_EIO_ADMIN_UNAUTH ) |
| #define | EINFO_EIO_ADMIN_RESOURCE __einfo_uniqify ( EINFO_EIO, 0x1c, "Resource exhausted" ) |
| #define | EIO_ADMIN_RESOURCE __einfo_error ( EINFO_EIO_ADMIN_RESOURCE ) |
| #define | EINFO_EIO_ADMIN_UNAVAIL __einfo_uniqify ( EINFO_EIO, 0x1d, "Unavailable" ) |
| #define | EIO_ADMIN_UNAVAIL __einfo_error ( EINFO_EIO_ADMIN_UNAVAIL ) |
| #define | EINFO_EIO_ADMIN_NOTSUP __einfo_uniqify ( EINFO_EIO, 0x1e, "Unimplemented" ) |
| #define | EIO_ADMIN_NOTSUP __einfo_error ( EINFO_EIO_ADMIN_NOTSUP ) |
| #define | EINFO_EIO_ADMIN_UNKNOWN __einfo_uniqify ( EINFO_EIO, 0x1f, "Unknown error" ) |
| #define | EIO_ADMIN_UNKNOWN __einfo_error ( EINFO_EIO_ADMIN_UNKNOWN ) |
| #define | EIO_ADMIN(status) |
|
| | FILE_LICENCE (GPL2_OR_LATER_OR_UBDL) |
| static size_t | gve_offset (struct gve_queue *queue, unsigned int tag) |
| | Get buffer offset (within queue page list allocation)
|
| static physaddr_t | gve_address (struct gve_queue *queue, unsigned int tag) |
| | Get buffer address (within queue page list address space)
|
| static void * | gve_buffer (struct gve_queue *queue, unsigned int tag) |
| | Get buffer address.
|
| static int | gve_reset (struct gve_nic *gve) |
| | Reset hardware.
|
| static const char * | gve_mode_name (unsigned int mode) |
| | Get operating mode name (for debugging)
|
| static int | gve_admin_alloc (struct gve_nic *gve) |
| | Allocate admin queue.
|
| static void | gve_admin_free (struct gve_nic *gve) |
| | Free admin queue.
|
| static void | gve_admin_enable (struct gve_nic *gve) |
| | Enable admin queue.
|
| static union gve_admin_command * | gve_admin_command (struct gve_nic *gve) |
| | Get next available admin queue command slot.
|
| static int | gve_admin_wait (struct gve_nic *gve) |
| | Wait for admin queue command to complete.
|
| static int | gve_admin (struct gve_nic *gve) |
| | Issue admin queue command.
|
| static int | gve_admin_simple (struct gve_nic *gve, unsigned int opcode, unsigned int id) |
| | Issue simple admin queue command.
|
| static int | gve_describe (struct gve_nic *gve) |
| | Get device descriptor.
|
| static int | gve_configure (struct gve_nic *gve) |
| | Configure device resources.
|
| static int | gve_deconfigure (struct gve_nic *gve) |
| | Deconfigure device resources.
|
| static int | gve_register (struct gve_nic *gve, struct gve_qpl *qpl) |
| | Register queue page list.
|
| static int | gve_unregister (struct gve_nic *gve, struct gve_qpl *qpl) |
| | Unregister page list.
|
| static void | gve_create_tx_param (struct gve_queue *queue, uint32_t qpl, union gve_admin_command *cmd) |
| | Construct command to create transmit queue.
|
| static void | gve_create_rx_param (struct gve_queue *queue, uint32_t qpl, union gve_admin_command *cmd) |
| | Construct command to create receive queue.
|
| static int | gve_create_queue (struct gve_nic *gve, struct gve_queue *queue) |
| | Create transmit or receive queue.
|
| static int | gve_destroy_queue (struct gve_nic *gve, struct gve_queue *queue) |
| | Destroy transmit or receive queue.
|
| static int | gve_alloc_shared (struct gve_nic *gve) |
| | Allocate shared queue resources.
|
| static void | gve_free_shared (struct gve_nic *gve) |
| | Free shared queue resources.
|
| static int | gve_alloc_qpl (struct gve_nic *gve, struct gve_qpl *qpl, uint32_t id, unsigned int buffers) |
| | Allocate queue page list.
|
| static void | gve_free_qpl (struct gve_nic *nic __unused, struct gve_qpl *qpl) |
| | Free queue page list.
|
| static unsigned int | gve_next (unsigned int seq) |
| | Calculate next receive sequence number.
|
| static int | gve_alloc_queue (struct gve_nic *gve, struct gve_queue *queue) |
| | Allocate descriptor queue.
|
| static void | gve_free_queue (struct gve_nic *gve, struct gve_queue *queue) |
| | Free descriptor queue.
|
| static void | gve_cancel_tx (struct gve_nic *gve) |
| | Cancel any pending transmissions.
|
| static int | gve_start (struct gve_nic *gve) |
| | Start up device.
|
| static void | gve_stop (struct gve_nic *gve) |
| | Stop device.
|
| static void | gve_startup (struct gve_nic *gve) |
| | Device startup process.
|
| static void | gve_restart (struct gve_nic *gve) |
| | Trigger startup process.
|
| static void | gve_watchdog (struct retry_timer *timer, int over __unused) |
| | Reset recovery watchdog.
|
| static int | gve_open (struct net_device *netdev) |
| | Open network device.
|
| static void | gve_close (struct net_device *netdev) |
| | Close network device.
|
| static int | gve_transmit (struct net_device *netdev, struct io_buffer *iobuf) |
| | Transmit packet.
|
| static void | gve_poll_tx (struct net_device *netdev) |
| | Poll for completed transmissions.
|
| static void | gve_poll_rx (struct net_device *netdev) |
| | Poll for received packets.
|
| static void | gve_refill_rx (struct net_device *netdev) |
| | Refill receive queue.
|
| static void | gve_poll (struct net_device *netdev) |
| | Poll for completed and received packets.
|
| static int | gve_setup (struct gve_nic *gve) |
| | Set up admin queue and get device description.
|
| static int | gve_probe (struct pci_device *pci) |
| | Probe PCI device.
|
| static void | gve_remove (struct pci_device *pci) |
| | Remove PCI device.
|
Google Virtual Ethernet network driver.
Definition in file gve.c.
| int gve_admin |
( |
struct gve_nic * | gve | ) |
|
|
static |
Issue admin queue command.
- Parameters
-
- Return values
-
Definition at line 406 of file gve.c.
406 {
413
414
417
418
422 DBGC2 ( gve,
"GVE %p AQ %#02x command %#04x request:\n",
425
426
428
429
432
433
436
437
441 DBGC ( gve,
"GVE %p AQ %#02x command %#04x failed: %#08x\n",
446 }
447
448 DBGC2 ( gve,
"GVE %p AQ %#02x command %#04x result:\n",
451 return 0;
452}
static int gve_admin_wait(struct gve_nic *gve)
Wait for admin queue command to complete.
#define EIO_ADMIN(status)
#define GVE_CFG_ADMIN_DB
Admin queue doorbell.
#define GVE_ADMIN_STATUS_OK
Command succeeded.
#define be32_to_cpu(value)
char * strerror(int errno)
Retrieve string representation of error number.
References gve_nic::admin, be32_to_cpu, bswap_32, gve_nic::cfg, cmd, gve_admin::cmd, DBGC, DBGC2, DBGC2_HDA, DBGC_HDA, EIO_ADMIN, GVE_ADMIN_COUNT, GVE_ADMIN_STATUS_OK, gve_admin_wait(), GVE_CFG_ADMIN_DB, index, opcode, gve_admin::prod, rc, status, strerror(), wmb, and writel.
| int gve_describe |
( |
struct gve_nic * | gve | ) |
|
|
static |
Get device descriptor.
- Parameters
-
- Return values
-
Definition at line 488 of file gve.c.
488 {
499
500
506
507
510 DBGC2 ( gve,
"GVE %p device descriptor:\n", gve );
512
513
517 DBGC ( gve,
"GVE %p using %d TX, %d RX, %d events\n",
519
520
525 DBGC ( gve,
"GVE %p MAC %s (\"%s\") MTU %zd\n",
528
529
535
536
537 if ( (
offset +
sizeof ( *opt ) ) >
max ) {
538 DBGC ( gve,
"GVE %p underlength option at +%#02zx:\n",
542 }
544
545
548 DBGC ( gve,
"GVE %p malformed option at +%#02zx:\n",
552 }
553
554
556 if (
id < ( 8 *
sizeof ( gve->
options ) ) )
558 }
559 DBGC ( gve,
"GVE %p supports options %#08x\n", gve, gve->
options );
560
561
563
566
569
572
574 } else {
575
577 }
578 DBGC ( gve,
"GVE %p using %s mode\n",
580
581 return 0;
582}
typeof(acpi_finder=acpi_find)
ACPI table finder.
#define build_assert(condition)
Assert a condition at build time (after dead code elimination)
uint16_t offset
Offset to command line.
static union @024010030001061367220137227263210031030210157031 opts
"cert<xxx>" option list
uint8_t id
Request identifier.
struct ena_llq_option desc
Descriptor counts.
const char * eth_ntoa(const void *ll_addr)
Transcribe Ethernet address.
static struct net_device * netdev
static unsigned int count
Number of entries.
#define EINVAL
Invalid argument.
static const char * gve_mode_name(unsigned int mode)
Get operating mode name (for debugging)
#define GVE_OPT_DQO_QPL
Out-of-order descriptor queues with queue page list addressing.
#define GVE_ADMIN_DESCRIBE_VER
Device descriptor version.
#define GVE_OPT_GQI_QPL
In-order descriptor queues with queue page list addressing.
#define GVE_OPT_DQO_RDA
Out-of-order descriptor queues with raw DMA addressing.
#define GVE_OPT_GQI_RDA
In-order descriptor queues with raw DMA addressing.
#define GVE_ADMIN_DESCRIBE
Describe device command.
#define cpu_to_be64(value)
#define be16_to_cpu(value)
void * memcpy(void *dest, const void *src, size_t len) __nonnull
char * inet_ntoa(struct in_addr in)
Convert IPv4 address to dotted-quad notation.
#define offsetof(type, field)
Get offset of a field within a structure.
unsigned int count
Actual number of event counters.
uint32_t options
Supported options.
unsigned int mode
Operating mode.
struct gve_queue tx
Transmit queue.
struct net_device * netdev
Network device.
struct gve_events events
Event counters.
struct gve_queue rx
Receive queue.
uint16_t len
Length (excluding this header)
unsigned int count
Number of descriptors (must be a power of two)
struct gve_device_descriptor desc
Device descriptor.
References be16_to_cpu, gve_scratch::buf, build_assert, cmd, count, gve_events::count, gve_queue::count, cpu_to_be32, cpu_to_be64, DBGC, DBGC2, DBGC2_HDA, DBGC_HDA, desc, gve_scratch::desc, dma(), EINVAL, ETH_ALEN, ETH_HLEN, eth_ntoa(), gve_nic::events, gve_admin_command(), GVE_ADMIN_DESCRIBE, GVE_ADMIN_DESCRIBE_VER, GVE_MODE_DQO, gve_mode_name(), GVE_MODE_QPL, GVE_OPT_DQO_QPL, GVE_OPT_DQO_RDA, GVE_OPT_GQI_QPL, GVE_OPT_GQI_RDA, gve_option::id, id, inet_ntoa(), gve_option::len, len, gve_scratch::map, max, memcpy(), gve_nic::mode, gve_nic::netdev, netdev, offset, offsetof, gve_nic::options, opts, rc, gve_nic::rx, gve_nic::scratch, gve_nic::tx, and typeof().
Referenced by gve_setup().
| int gve_configure |
( |
struct gve_nic * | gve | ) |
|
|
static |
Configure device resources.
- Parameters
-
- Return values
-
Definition at line 590 of file gve.c.
590 {
595 unsigned int db_off;
596 unsigned int i;
598
599
610
611
614
615
621 DBGC ( gve,
"GVE %p IRQ %d doorbell +%#04x\n", gve, i, db_off );
622 irqs->
db[i] = ( gve->
db + db_off );
624 }
625
626 return 0;
627}
uint32_t doorbell
Doorbell register offset.
#define GVE_IRQ_COUNT
Number of interrupt channels.
#define GVE_GQI_IRQ_DISABLE
Disable in-order queue interrupt.
#define GVE_FORMAT(mode)
Descriptor queue format.
#define GVE_ADMIN_CONFIGURE
Configure device resources command.
struct gve_event * event
Event counters.
struct dma_mapping map
DMA mapping.
uint32_t db_idx
Interrupt doorbell index (within doorbell BAR)
struct dma_mapping map
DMA mapping.
volatile uint32_t * db[GVE_IRQ_COUNT]
Interrupt doorbells.
struct gve_irq * irq
Interrupt channels.
void * db
Doorbell registers.
struct gve_irqs irqs
Interrupt channels.
References be32_to_cpu, bswap_32, cmd, gve_events::count, cpu_to_be32, cpu_to_be64, gve_irqs::db, gve_nic::db, gve_irq::db_idx, DBGC, dma(), doorbell, gve_events::event, gve_nic::events, gve_admin_command(), GVE_ADMIN_CONFIGURE, GVE_FORMAT, GVE_GQI_IRQ_DISABLE, GVE_IRQ_COUNT, GVE_MODE_DQO, gve_irqs::irq, gve_nic::irqs, gve_events::map, gve_irqs::map, gve_nic::mode, rc, and writel.
Referenced by gve_start().
Create transmit or receive queue.
- Parameters
-
| gve | GVE device |
| queue | Descriptor queue |
- Return values
-
Definition at line 765 of file gve.c.
765 {
770 unsigned int db_off;
771 unsigned int evt_idx;
773 unsigned int i;
776
777
783 for ( i = 0 ; i <
queue->fill ; i++ )
785
786
788 buf = (
queue->desc.raw +
stride->desc -
sizeof ( *buf ) );
789 for ( i = 0 ; i <
queue->count ; i++ ) {
792 buf = ( ( (
void * ) buf ) +
stride->desc );
793 }
794 }
795
796
798 cmd->hdr.opcode =
type->create;
801
802
805
806
809 DBGC ( gve,
"GVE %p %s doorbell +%#04x event counter %d\n",
810 gve,
type->name, db_off, evt_idx );
811 queue->db = ( gve->
db + db_off );
812 assert ( evt_idx < gve->events.count );
815
816
818
819
822
823 return 0;
824}
struct ena_llq_option stride
Descriptor strides.
static physaddr_t gve_address(struct gve_queue *queue, unsigned int tag)
Get buffer address (within queue page list address space)
#define GVE_DQO_IRQ_REARM
Rearm out-of-order queue interrupt.
#define GVE_RAW_QPL
Raw DMA addressing queue page list ID.
static void pci_msix_unmask(struct pci_msix *msix, unsigned int vector)
Unmask MSI-X interrupt vector.
A transmit or receive buffer descriptor.
uint64_t addr
Address (within queue page list address space)
struct pci_msix msix
Dummy MSI-X interrupt.
References gve_buffer::addr, assert, be32_to_cpu, cmd, cpu_to_be64, gve_irqs::db, gve_nic::db, DBGC, gve_events::event, gve_nic::events, gve_address(), gve_admin_command(), GVE_DQO_IRQ_REARM, GVE_MODE_DQO, GVE_MODE_QPL, GVE_RAW_QPL, gve_nic::irqs, memset(), gve_nic::mode, gve_nic::msix, pci_msix_unmask(), queue, rc, stride, tag, type, and writel.
Referenced by gve_start().
Allocate queue page list.
- Parameters
-
| gve | GVE device |
| qpl | Queue page list |
| id | Queue page list ID |
| buffers | Number of data buffers |
- Return values
-
Definition at line 924 of file gve.c.
925 {
927
928
930
931
935
936
943
944 DBGC ( gve,
"GVE %p QPL %#08x at [%08lx,%08lx)\n",
945 gve, qpl->
id, virt_to_phys ( qpl->
data ),
946 ( virt_to_phys ( qpl->
data ) +
len ) );
947 return 0;
948}
#define GVE_BUF_PER_PAGE
Number of data buffers per page.
#define GVE_QPL_MAX
Maximum number of pages per queue.
void * dma_umalloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer from external (user) memory.
physaddr_t base
Queue page list base device address.
References assert, gve_qpl::base, build_assert, gve_qpl::count, gve_qpl::data, DBGC, dma(), gve_nic::dma, dma_umalloc(), ENOMEM, GVE_ALIGN, GVE_BUF_PER_PAGE, GVE_BUF_SIZE, GVE_MODE_QPL, GVE_PAGE_SIZE, GVE_QPL_MAX, gve_qpl::id, id, len, gve_qpl::map, and gve_nic::mode.
Referenced by gve_alloc_queue().
Allocate descriptor queue.
- Parameters
-
| gve | GVE device |
| queue | Descriptor queue |
- Return values
-
Definition at line 996 of file gve.c.
996 {
1000 size_t desc_len;
1001 size_t cmplt_len;
1002 size_t res_len;
1004
1005
1006 if ( (
queue->count == 0 ) ||
1008 DBGC ( gve,
"GVE %p %s invalid queue size %d\n",
1011 goto err_sanity;
1012 }
1013
1014
1016 type->stride.dqo :
type->stride.gqi );
1019 res_len =
sizeof ( *
queue->res );
1020
1021
1026 DBGC ( gve,
"GVE %p %s using QPL %#08x with %d/%d descriptors\n",
1028
1029
1031 queue->fill ) ) != 0 )
1032 goto err_qpl;
1033
1034
1037 if ( !
queue->desc.raw ) {
1039 goto err_desc;
1040 }
1041 DBGC ( gve,
"GVE %p %s descriptors at [%08lx,%08lx)\n",
1042 gve,
type->name, virt_to_phys (
queue->desc.raw ),
1043 ( virt_to_phys (
queue->desc.raw ) + desc_len ) );
1044
1045
1046 if ( cmplt_len ) {
1049 if ( !
queue->cmplt.raw ) {
1051 goto err_cmplt;
1052 }
1053 DBGC ( gve,
"GVE %p %s completions at [%08lx,%08lx)\n",
1054 gve,
type->name, virt_to_phys (
queue->cmplt.raw ),
1055 ( virt_to_phys (
queue->cmplt.raw ) + cmplt_len ) );
1056 }
1057
1058
1060 if ( !
queue->res ) {
1062 goto err_res;
1063 }
1065
1066 return 0;
1067
1069 err_res:
1070 if ( cmplt_len )
1072 err_cmplt:
1074 err_desc:
1076 err_qpl:
1077 err_sanity:
1079}
static int gve_alloc_qpl(struct gve_nic *gve, struct gve_qpl *qpl, uint32_t id, unsigned int buffers)
Allocate queue page list.
static void gve_free_qpl(struct gve_nic *nic __unused, struct gve_qpl *qpl)
Free queue page list.
References assert, DBGC, dma(), gve_nic::dma, dma_alloc(), dma_free(), dma_ufree(), dma_umalloc(), EINVAL, ENOMEM, GVE_ALIGN, gve_alloc_qpl(), gve_free_qpl(), GVE_MODE_DQO, memset(), gve_nic::mode, queue, rc, stride, and type.
Referenced by gve_open().
Transmit packet.
- Parameters
-
| netdev | Network device |
| iobuf | I/O buffer |
- Return values
-
Definition at line 1371 of file gve.c.
1371 {
1379 unsigned int chain;
1381 size_t frag_len;
1385
1386
1389
1390
1393 if ( ( (
tx->prod -
tx->cons ) +
count ) >
tx->fill ) {
1395 return 0;
1396 }
1397
1398
1400
1401
1402 index = (
tx->prod++ & (
tx->count - 1 ) );
1404
1405
1407
1408
1415
1416
1418
1419
1420 dqo = &
tx->desc.tx.dqo[
index];
1428 } else {
1432 }
1435
1436 } else {
1437
1438
1439 gqi = &
tx->desc.tx.gqi[
index];
1444 } else {
1448 }
1450
1451 }
1452 DBGC2 ( gve,
"GVE %p TXD %#04x %#02x:%#02x len %#04zx/%#04zx "
1455
1456
1459 break;
1460 }
1461 }
1463
1464
1468 } else {
1470 }
1473
1474 return 0;
1475}
uint32_t next
Next descriptor address.
#define ENETDOWN
Network is down.
#define GVE_DQO_TX_TYPE_LAST
Last transmit descriptor in a packet.
#define GVE_GQI_TX_TYPE_CONT
Continuation of packet transmit descriptor type.
#define GVE_TX_FILL
Maximum number of transmit buffers.
#define GVE_DQO_TX_TYPE_PACKET
Normal packet transmit descriptor type.
#define GVE_GQI_TX_TYPE_START
Start of packet transmit descriptor type.
#define cpu_to_le64(value)
#define cpu_to_le16(value)
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
void netdev_tx_defer(struct net_device *netdev, struct io_buffer *iobuf)
Defer transmitted packet.
static int netdev_link_ok(struct net_device *netdev)
Check link state of network device.
An out-of-order transmit descriptor.
uint16_t len
Length of this descriptor.
struct gve_dqo_tx_tag tag
Tag.
struct gve_buffer buf
Buffer descriptor.
uint8_t type
Descriptor type and flags.
int8_t count
Number of descriptors covered by this completion.
uint8_t id
Buffer index within queue page list.
An in-order transmit descriptor.
uint16_t total
Total length of this packet.
uint8_t count
Number of descriptors in this packet.
uint16_t len
Length of this descriptor.
uint8_t tx_chain[GVE_TX_FILL]
Transmit tag chain.
void * data
Start of data.
References gve_buffer::addr, assert, bswap_32, gve_dqo_tx_descriptor::buf, count, gve_dqo_tx_tag::count, gve_gqi_tx_descriptor::count, cpu_to_be16, cpu_to_le16, cpu_to_le64, io_buffer::data, DBGC2, doorbell, ENETDOWN, gve_address(), GVE_BUF_SIZE, GVE_DQO_TX_TYPE_LAST, GVE_DQO_TX_TYPE_PACKET, GVE_GQI_TX_TYPE_CONT, GVE_GQI_TX_TYPE_START, GVE_MODE_DQO, GVE_TX_FILL, gve_dqo_tx_tag::id, index, iob_len(), gve_dqo_tx_descriptor::len, gve_gqi_tx_descriptor::len, len, memcpy(), gve_nic::mode, netdev, netdev_link_ok(), netdev_tx_defer(), next, NULL, offset, gve_dqo_tx_descriptor::tag, tag, gve_gqi_tx_descriptor::total, gve_nic::tx, tx, gve_nic::tx_chain, gve_nic::tx_iobuf, gve_dqo_tx_descriptor::type, gve_gqi_tx_descriptor::type, wmb, and writel.
Poll for completed transmissions.
- Parameters
-
Definition at line 1482 of file gve.c.
1482 {
1488 unsigned int gen;
1492
1493
1495
1496
1497 while ( 1 ) {
1498
1499
1500 gen = (
tx->done &
tx->count );
1501 index = (
tx->done & (
tx->count - 1 ) );
1502 dqo = &
tx->cmplt.tx.dqo[
index];
1503
1504
1506 if ( ( !!
bit ) == ( !! gen ) )
1507 break;
1510
1511
1514 DBGC2 ( gve,
"GVE %p TXC %#04x flags %#02x "
1516 continue;
1517 }
1518
1519
1525
1526
1528 DBGC2 ( gve,
"GVE %p TXC %#04x %#02x:%#02x "
1533 }
1534
1535
1536 if ( iobuf )
1538 }
1539
1540 } else {
1541
1542
1544
1545
1547 DBGC2 ( gve,
"GVE %p TXC %#04x complete\n",
1553 if ( iobuf )
1555 }
1556 }
1557}
#define GVE_DQO_TXF_PKT
Transmit completion packet flag.
#define GVE_DQO_TXF_GEN
Transmit completion generation flag.
static unsigned int unsigned int bit
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
An out-of-order transmit completion.
struct gve_dqo_tx_tag tag
Tag.
uint8_t flags
Completion flags.
References assert, be32_to_cpu, bit, count, gve_dqo_tx_tag::count, DBGC2, gve_dqo_tx_completion::flags, GVE_DQO_TXF_GEN, GVE_DQO_TXF_PKT, GVE_MODE_DQO, GVE_TX_FILL, gve_dqo_tx_tag::id, index, gve_nic::mode, netdev, netdev_tx_complete(), NULL, rmb, gve_dqo_tx_completion::tag, tag, gve_nic::tx, tx, gve_nic::tx_chain, and gve_nic::tx_iobuf.
Referenced by gve_poll().
Poll for received packets.
- Parameters
-
Definition at line 1564 of file gve.c.
1564 {
1571 unsigned int gen;
1576 size_t total;
1579
1580
1583 total = 0;
1584 while ( 1 ) {
1585
1586
1588 gen = (
done &
rx->count );
1591
1592
1593 dqo = &
rx->cmplt.rx.dqo[
index];
1594
1595
1597 if ( ( !!
bit ) == ( !! gen ) )
1598 break;
1600
1601
1605 DBGC2 ( gve,
"GVE %p RXC %#04x %#02x:%#02x len %#04zx "
1608
1609
1612 total = 0;
1613 } else {
1616 continue;
1617 }
1618
1619 } else {
1620
1621
1622 gqi = &
rx->cmplt.rx.gqi[
index];
1623
1624
1626 break;
1629
1630
1633 DBGC2 ( gve,
"GVE %p RXC %#04x %#02x:%#02x len %#04zx "
1634 "at %#08zx\n", gve,
index, gqi->
seq,
1636
1637
1640 total = 0;
1641 } else {
1644 continue;
1645 }
1647 }
1648
1649
1651 for ( ;
rx->done !=
done ;
rx->done++ ) {
1652
1653
1654 index = (
rx->done & (
rx->count - 1 ) );
1656 dqo = &
rx->cmplt.rx.dqo[
index];
1661 } else {
1662 gqi = &
rx->cmplt.rx.gqi[
index];
1667 }
1668
1669
1670 if ( iobuf ) {
1673 }
1674 }
1676 total = 0;
1677
1678
1679 if ( iobuf ) {
1683 } else {
1685 }
1686 }
1687}
struct bofm_section_header done
#define EIO
Input/output error.
#define GVE_GQI_RXF_ERROR
Receive error.
#define GVE_DQO_RXL_GEN
Receive completion generation flag.
#define GVE_GQI_RX_PAD
Padding at the start of all received packets.
#define GVE_RX_FILL
Maximum number of receive buffers.
#define GVE_GQI_RXF_MORE
Receive packet continues into next descriptor.
#define GVE_DQO_RXF_LAST
Last receive descriptor in a packet.
#define GVE_DQO_RXS_ERROR
Receive error.
#define le16_to_cpu(value)
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
#define iob_put(iobuf, len)
#define iob_pull(iobuf, len)
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
An out-of-order receive completion.
uint16_t len
Length and generation bit.
An in-order receive completion descriptor.
uint8_t seq
Sequence number.
References alloc_iob(), assert, be16_to_cpu, bit, cpu_to_le16, DBGC2, done, EIO, ENOMEM, gve_dqo_rx_completion::flags, gve_gqi_rx_completion::flags, GVE_BUF_SIZE, GVE_DQO_RXF_LAST, GVE_DQO_RXL_GEN, GVE_DQO_RXS_ERROR, GVE_GQI_RX_PAD, GVE_GQI_RX_SEQ_MASK, GVE_GQI_RXF_ERROR, GVE_GQI_RXF_MORE, GVE_MODE_DQO, gve_next(), gve_offset(), GVE_RX_FILL, index, iob_len(), iob_pull, iob_put, le16_to_cpu, gve_dqo_rx_completion::len, gve_gqi_rx_completion::len, len, memcpy(), gve_nic::mode, netdev, netdev_rx(), netdev_rx_err(), NULL, rc, rmb, gve_nic::rx, rx, gve_gqi_rx_completion::seq, gve_nic::seq, seq, gve_dqo_rx_completion::status, gve_dqo_rx_completion::tag, and tag.
Referenced by gve_poll().
Probe PCI device.
- Parameters
-
- Return values
-
Definition at line 1882 of file gve.c.
1882 {
1885 unsigned long cfg_start;
1886 unsigned long db_start;
1887 unsigned long db_size;
1889
1890
1894 goto err_alloc;
1895 }
1900 memset ( gve, 0,
sizeof ( *gve ) );
1909
1910
1912
1913
1915 DBGC ( gve,
"GVE %p is revision %#02x\n", gve, gve->
revision );
1916
1917
1922 goto err_cfg;
1923 }
1924
1925
1931 goto err_db;
1932 }
1933
1934
1938
1939
1941 goto err_msix;
1942
1943
1945 goto err_admin;
1946
1947
1949 goto err_setup;
1950
1951
1953 goto err_register_netdev;
1954
1955 return 0;
1956
1958 err_register_netdev:
1959 err_setup:
1962 err_admin:
1964 err_msix:
1966 err_db:
1968 err_cfg:
1971 err_alloc:
1973}
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
#define ENODEV
No such device.
static struct process_descriptor gve_startup_desc
Device startup process descriptor.
static int gve_setup(struct gve_nic *gve)
Set up admin queue and get device description.
static const struct gve_queue_type gve_tx_type
Transmit descriptor queue type.
static int gve_admin_alloc(struct gve_nic *gve)
Allocate admin queue.
static struct net_device_operations gve_operations
GVE network device operations.
static void gve_admin_free(struct gve_nic *gve)
Free admin queue.
static const struct gve_queue_type gve_rx_type
Receive descriptor queue type.
static void gve_watchdog(struct retry_timer *timer, int over __unused)
Reset recovery watchdog.
#define GVE_DB_BAR
Doorbell BAR.
#define GVE_CFG_SIZE
Configuration BAR size.
#define GVE_CFG_BAR
Configuration BAR.
void iounmap(volatile const void *io_addr)
Unmap I/O address.
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
int pci_read_config_byte(struct pci_device *pci, unsigned int where, uint8_t *value)
Read byte from PCI configuration space.
static __always_inline void dma_set_mask_64bit(struct dma_device *dma)
Set 64-bit addressable space mask.
void unregister_netdev(struct net_device *netdev)
Unregister network device.
int register_netdev(struct net_device *netdev)
Register network device.
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Get the size of a PCI BAR.
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
#define PCI_REVISION
PCI revision.
int pci_msix_enable(struct pci_device *pci, struct pci_msix *msix)
Enable MSI-X interrupts.
void pci_msix_disable(struct pci_device *pci, struct pci_msix *msix)
Disable MSI-X interrupts.
static void process_init_stopped(struct process *process, struct process_descriptor *desc, struct refcnt *refcnt)
Initialise process without adding to process list.
uint8_t revision
PCI revision.
uint8_t rx_tag[GVE_RX_FILL]
Receive tag ring.
uint8_t tx_tag[GVE_TX_FILL]
Transmit tag ring.
const struct gve_queue_type * type
Queue type.
struct device dev
Generic device.
struct dma_device dma
DMA device.
References adjust_pci_device(), alloc_etherdev(), assert, gve_nic::cfg, gve_nic::db, DBGC, pci_device::dev, gve_nic::dma, pci_device::dma, dma_set_mask_64bit(), ENODEV, ENOMEM, gve_admin_alloc(), gve_admin_free(), GVE_CFG_BAR, GVE_CFG_SIZE, GVE_DB_BAR, gve_operations, gve_reset(), gve_rx_type, gve_setup(), gve_startup_desc, gve_tx_type, gve_watchdog(), iounmap(), memset(), gve_nic::msix, gve_nic::netdev, netdev, netdev_init(), netdev_nullify(), netdev_put(), NULL, pci_bar_size(), pci_bar_start(), pci_ioremap(), pci_msix_disable(), pci_msix_enable(), pci_read_config_byte(), PCI_REVISION, pci_set_drvdata(), process_init_stopped(), rc, register_netdev(), gve_nic::revision, gve_nic::rx, gve_nic::rx_tag, gve_nic::startup, gve_queue::tag, gve_nic::tx, gve_nic::tx_tag, gve_queue::type, unregister_netdev(), and gve_nic::watchdog.