30 PCI_ROM( 0x14e4, 0x1604,
"14e4-1604",
"Broadcom BCM957454", 0 ),
31 PCI_ROM( 0x14e4, 0x1605,
"14e4-1605",
"Broadcom BCM957454 RDMA", 0 ),
36 PCI_ROM( 0x14e4, 0x1614,
"14e4-1614",
"Broadcom BCM957454", 0 ),
38 PCI_ROM( 0x14e4, 0x16c0,
"14e4-16c0",
"Broadcom BCM957417", 0 ),
41 PCI_ROM( 0x14e4, 0x16c8,
"14e4-16c8",
"Broadcom BCM957301", 0 ),
42 PCI_ROM( 0x14e4, 0x16c9,
"14e4-16c9",
"Broadcom BCM957302", 0 ),
43 PCI_ROM( 0x14e4, 0x16ca,
"14e4-16ca",
"Broadcom BCM957304", 0 ),
44 PCI_ROM( 0x14e4, 0x16cc,
"14e4-16cc",
"Broadcom BCM957417 MF", 0 ),
45 PCI_ROM( 0x14e4, 0x16cd,
"14e4-16cd",
"Broadcom BCM958700", 0 ),
46 PCI_ROM( 0x14e4, 0x16ce,
"14e4-16ce",
"Broadcom BCM957311", 0 ),
47 PCI_ROM( 0x14e4, 0x16cf,
"14e4-16cf",
"Broadcom BCM957312", 0 ),
48 PCI_ROM( 0x14e4, 0x16d0,
"14e4-16d0",
"Broadcom BCM957402", 0 ),
49 PCI_ROM( 0x14e4, 0x16d1,
"14e4-16d1",
"Broadcom BCM957404", 0 ),
50 PCI_ROM( 0x14e4, 0x16d2,
"14e4-16d2",
"Broadcom BCM957406", 0 ),
51 PCI_ROM( 0x14e4, 0x16d4,
"14e4-16d4",
"Broadcom BCM957402 MF", 0 ),
52 PCI_ROM( 0x14e4, 0x16d5,
"14e4-16d5",
"Broadcom BCM957407", 0 ),
53 PCI_ROM( 0x14e4, 0x16d6,
"14e4-16d6",
"Broadcom BCM957412", 0 ),
54 PCI_ROM( 0x14e4, 0x16d7,
"14e4-16d7",
"Broadcom BCM957414", 0 ),
55 PCI_ROM( 0x14e4, 0x16d8,
"14e4-16d8",
"Broadcom BCM957416", 0 ),
56 PCI_ROM( 0x14e4, 0x16d9,
"14e4-16d9",
"Broadcom BCM957417", 0 ),
57 PCI_ROM( 0x14e4, 0x16da,
"14e4-16da",
"Broadcom BCM957402", 0 ),
58 PCI_ROM( 0x14e4, 0x16db,
"14e4-16db",
"Broadcom BCM957404", 0 ),
60 PCI_ROM( 0x14e4, 0x16de,
"14e4-16de",
"Broadcom BCM957412 MF", 0 ),
61 PCI_ROM( 0x14e4, 0x16df,
"14e4-16df",
"Broadcom BCM957314", 0 ),
62 PCI_ROM( 0x14e4, 0x16e0,
"14e4-16e0",
"Broadcom BCM957317", 0 ),
63 PCI_ROM( 0x14e4, 0x16e2,
"14e4-16e2",
"Broadcom BCM957417", 0 ),
64 PCI_ROM( 0x14e4, 0x16e3,
"14e4-16e3",
"Broadcom BCM957416", 0 ),
65 PCI_ROM( 0x14e4, 0x16e4,
"14e4-16e4",
"Broadcom BCM957317", 0 ),
66 PCI_ROM( 0x14e4, 0x16e7,
"14e4-16e7",
"Broadcom BCM957404 MF", 0 ),
67 PCI_ROM( 0x14e4, 0x16e8,
"14e4-16e8",
"Broadcom BCM957406 MF", 0 ),
68 PCI_ROM( 0x14e4, 0x16e9,
"14e4-16e9",
"Broadcom BCM957407", 0 ),
69 PCI_ROM( 0x14e4, 0x16ea,
"14e4-16ea",
"Broadcom BCM957407 MF", 0 ),
70 PCI_ROM( 0x14e4, 0x16eb,
"14e4-16eb",
"Broadcom BCM957412 RDMA MF", 0 ),
71 PCI_ROM( 0x14e4, 0x16ec,
"14e4-16ec",
"Broadcom BCM957414 MF", 0 ),
72 PCI_ROM( 0x14e4, 0x16ed,
"14e4-16ed",
"Broadcom BCM957414 RDMA MF", 0 ),
73 PCI_ROM( 0x14e4, 0x16ee,
"14e4-16ee",
"Broadcom BCM957416 MF", 0 ),
74 PCI_ROM( 0x14e4, 0x16ef,
"14e4-16ef",
"Broadcom BCM957416 RDMA MF", 0 ),
75 PCI_ROM( 0x14e4, 0x16f0,
"14e4-16f0",
"Broadcom BCM957320", 0 ),
76 PCI_ROM( 0x14e4, 0x16f1,
"14e4-16f1",
"Broadcom BCM957320", 0 ),
77 PCI_ROM( 0x14e4, 0x1750,
"14e4-1750",
"Broadcom BCM957508", 0 ),
78 PCI_ROM( 0x14e4, 0x1751,
"14e4-1751",
"Broadcom BCM957504", 0 ),
79 PCI_ROM( 0x14e4, 0x1752,
"14e4-1752",
"Broadcom BCM957502", 0 ),
80 PCI_ROM( 0x14e4, 0x1760,
"14e4-1760",
"Broadcom BCM957608", 0 ),
81 PCI_ROM( 0x14e4, 0x1800,
"14e4-1800",
"Broadcom BCM957502 MF", 0 ),
82 PCI_ROM( 0x14e4, 0x1801,
"14e4-1801",
"Broadcom BCM957504 MF", 0 ),
83 PCI_ROM( 0x14e4, 0x1802,
"14e4-1802",
"Broadcom BCM957508 MF", 0 ),
84 PCI_ROM( 0x14e4, 0x1803,
"14e4-1803",
"Broadcom BCM957502 RDMA MF", 0 ),
85 PCI_ROM( 0x14e4, 0x1804,
"14e4-1804",
"Broadcom BCM957504 RDMA MF", 0 ),
86 PCI_ROM( 0x14e4, 0x1805,
"14e4-1805",
"Broadcom BCM957508 RDMA MF", 0 ),
108 DBGP (
"%s\n", __func__ );
125 unsigned long reg_base, reg_size;
136 DBGP (
"%s\n", __func__ );
146 &
bp->subsystem_vendor );
151 &
bp->subsystem_device );
174 DBGP (
"%s\n", __func__ );
177 DBGP (
"- %s ( ): Failed\n", __func__ );
212 off = (
void * ) (
bp->bar1 );
226 (
u32 )
bp->nq.epoch, 0 );
252 (
u32 )
bp->rx.epoch, 0 );
263 (
u32 )
bp->tx.epoch, 0 );
273 char *
src = (
char * )iob->
data;
301 return ( avail-use );
312 else if (
len < 1024 )
314 else if (
len < 2048 )
319 prod_bd->
dma = mapping;
329 iob =
bp->tx.iob[hw_idx];
341 DBGP (
"%s\n", __func__ );
345 for ( i = 0; i <
bp->rx.buf_cnt; i++ ) {
346 if (
bp->rx.iob[i] ) {
376 DBGP (
"- %s ( ): alloc_iob Failed\n", __func__ );
383 bp->rx.iob[iob_idx] = iob;
389 u16 cons_id = (
bp->rx.cons_id %
bp->rx.ring_cnt );
392 while (
bp->rx.iob_cnt <
bp->rx.buf_cnt ) {
393 iob_idx = ( cons_id %
bp->rx.buf_cnt );
394 if ( !
bp->rx.iob[iob_idx] ) {
400 cons_id =
NEXT_IDX ( cons_id,
bp->rx.ring_cnt );
402 if ( iob_idx > cons_id )
407 if ( cons_id !=
bp->rx.cons_id ) {
409 bp->rx.cons_id = cons_id;
425 u8 ignore_chksum_err = 0;
434 ignore_chksum_err = 1;
436 if ( err_flags && !ignore_chksum_err ) {
441 for ( i = 0; i < 6; i++ ) {
442 if ( rx_buf[6 + i] !=
bp->mac_addr[i] )
462 cons_id =
bp->cq.cons_id + cnt;
463 if ( cons_id >=
bp->cq.ring_cnt) {
465 bp->cq.completion_bit ^= 1;
467 cons_id = cons_id -
bp->cq.ring_cnt;
469 bp->cq.cons_id = cons_id;
489 bp->rx.iob[desc_idx] =
NULL;
501 u8 cmpl_bit =
bp->cq.completion_bit;
503 if (
bp->cq.cons_id == (
bp->cq.ring_cnt - 1 ) ) {
518 DBGP (
"%s\n", __func__ );
529 DBGP (
"%s\n", __func__ );
548 DBGP (
"%s\n", __func__ );
554 bp->nq.completion_bit = 0x1;
558 bp->cq.completion_bit = 0x1;
577 DBGP (
"%s\n", __func__ );
578 if (
bp->nq.bd_virt ) {
583 if (
bp->cq.bd_virt ) {
588 if (
bp->rx.bd_virt ) {
593 if (
bp->tx.bd_virt ) {
598 DBGP (
"- %s ( ): - Done\n", __func__ );
603 DBGP (
"%s\n", __func__ );
604 if (
bp->hwrm_addr_dma ) {
609 if (
bp->hwrm_addr_resp ) {
611 bp->hwrm_addr_resp =
NULL;
614 if (
bp->hwrm_addr_req ) {
618 DBGP (
"- %s ( ): - Done\n", __func__ );
623 DBGP (
"%s\n", __func__ );
631 if (
bp->hwrm_addr_req &&
632 bp->hwrm_addr_resp &&
633 bp->hwrm_addr_dma ) {
638 DBGP (
"- %s ( ): Failed\n", __func__ );
645 DBGP (
"%s\n", __func__ );
654 if (
bp->tx.bd_virt &&
662 DBGP (
"- %s ( ): Failed\n", __func__ );
681 for ( i = 0; i < cnt; i++ ) {
707 u8 *ptr = (
u8 * )resp;
713 if (
len >
bp->hwrm_max_req_len )
718 for ( idx = 0; idx < wait_cnt; idx++ ) {
740 DBGP (
"%s\n", __func__ );
756 if ( !
bp->hwrm_cmd_timeout )
791 DBGP (
"%s\n", __func__ );
851 DBGP (
"%s\n", __func__ );
859 bp->num_cmpl_rings =
bp->min_cp_rings;
862 bp->num_tx_rings =
bp->min_tx_rings;
865 bp->num_rx_rings =
bp->min_rx_rings;
868 bp->num_hw_ring_grps =
bp->min_hw_ring_grps;
871 bp->num_stat_ctxs =
bp->min_stat_ctxs;
887 DBGP (
"%s\n", __func__ );
907 DBGP (
"%s\n", __func__ );
917 DBGP (
"- %s ( ): Failed\n", __func__ );
925 bp->err_rcvry_supported = 1;
942 DBGP (
"%s\n", __func__ );
949 DBGP (
"- %s ( ): Failed\n", __func__ );
960 bp->ordinal_value = (
u8 )resp->
pci_id & 0x0F;
983 DBGP (
"%s\n", __func__ );
990 DBGP (
"-s %s ( ): Failed\n", __func__ );
1005 DBGP (
"%s\n", __func__ );
1019 DBGP (
"%s\n", __func__ );
1035 return wait_resp (
bp,
bp->hwrm_cmd_timeout, cmd_len, __func__ );
1046 DBGP (
"%s\n", __func__ );
1048 bp->er.drv_poll_freq = 100;
1049 if ( ! (
bp->err_rcvry_supported ) ) {
1060 DBGP (
"- %s ( ): Failed\n", __func__ );
1093 DBGP (
"rst_reg = %x ",
bp->er.rst_reg[i] );
1094 DBGP (
"rst_reg_val = %x ",
bp->er.rst_reg_val[i] );
1095 DBGP (
"rst_after_reset = %x\n",
bp->er.delay_after_rst[i] );
1107 DBGP (
"%s\n", __func__ );
1122 if (
bp->err_rcvry_supported ) {
1135 DBGP (
"- %s ( ): Failed\n", __func__ );
1149 DBGP (
"%s\n", __func__ );
1169 DBGP (
"%s\n", __func__ );
1171 idx =
bp->nq_ring_id;
1173 idx =
bp->cq_ring_id;
1182 req->async_event_cr = idx;
1184 req->guest_vlan =
bp->vlan_id;
1185 memcpy ( (
char * )&req->dflt_mac_addr[0],
bp->mac_addr,
1196 req->async_event_cr = idx;
1211 DBGP (
"%s\n", __func__ );
1223 req->enables = enables;
1224 memcpy ( (
char * )&req->l2_addr[0], (
char * )&
bp->mac_addr[0],
1232 req->src_id = (
u32 )
bp->port_idx;
1233 req->dst_id =
bp->vnic_id;
1249 DBGP (
"%s\n", __func__ );
1256 req->l2_filter_id =
bp->l2_filter_id;
1259 DBGP (
"- %s ( ): Failed\n", __func__ );
1295 req->vnic_id =
bp->vnic_id;
1298 return wait_resp (
bp,
bp->hwrm_cmd_timeout, cmd_len, __func__ );
1308 DBGP (
"%s\n", __func__ );
1314 DBGP (
"- %s ( ): Failed\n", __func__ );
1346 DBGP (
"%s\n", __func__ );
1356 cmd_len, __func__ );
1363 DBGP (
"%s\n", __func__ );
1449 DBGP (
"%s\n", __func__ );
1456 return wait_resp (
bp,
bp->hwrm_cmd_timeout, cmd_len, __func__ );
1464 DBGP (
"%s\n", __func__ );
1483 DBGP (
"%s\n", __func__ );
1494 DBGP (
"- %s ( ): Failed\n", __func__ );
1507 DBGP (
"%s\n", __func__ );
1514 return wait_resp (
bp,
bp->hwrm_cmd_timeout, cmd_len, __func__ );
1532 DBGP (
"%s\n", __func__ );
1653 req->port_id =
bp->port_idx;
1663 return wait_resp (
bp,
bp->hwrm_cmd_timeout, cmd_len, __func__ );
1670 DBGP (
"%s\n", __func__ );
1706 DBGP (
"%s\n", __func__ );
1708 for ( i = 0; i < (
bp->wait_link_timeout / 100 ); i++ ) {
1720 if ( !
bp->er.er_rst_on ) {
1733 DBGP (
"%s\n", __func__ );
1739 DBGP (
"- %s ( ): Failed\n", __func__ );
1754 DBGP (
"%s\n", __func__ );
1760 req->stat_ctx_id = (
u32 )
bp->stat_ctx_id;
1763 DBGP (
"- %s ( ): Failed\n", __func__ );
1777 DBGP (
"%s\n", __func__ );
1783 req->ring_group_id = (
u32 )
bp->ring_grp_id;
1786 DBGP (
"- %s ( ): Failed\n", __func__ );
1801 DBGP (
"%s\n", __func__ );
1808 req->cr =
bp->cq_ring_id;
1809 req->rr =
bp->rx_ring_id;
1812 req->sc =
bp->stat_ctx_id;
1816 DBGP (
"- %s ( ): Failed\n", __func__ );
1830 DBGP (
"%s\n", __func__ );
1835 return wait_resp (
bp,
bp->hwrm_cmd_timeout, cmd_len, __func__ );
1845 DBGP (
"%s\n", __func__ );
1849 req->ring_type =
type;
1854 req->length = (
u32 )
bp->nq.ring_cnt;
1855 req->logical_id = 0xFFFF;
1861 req->length = (
u32 )
bp->cq.ring_cnt;
1866 req->nq_ring_id =
bp->nq_ring_id;
1867 req->cq_handle = (
u64 )
bp->nq_ring_id;
1872 req->length = (
u32 )
bp->tx.ring_cnt;
1873 req->queue_id = (
u16 )
bp->queue_id;
1874 req->stat_ctx_id = (
u32 )
bp->stat_ctx_id;
1875 req->cmpl_ring_id =
bp->cq_ring_id;
1881 req->length = (
u32 )
bp->rx.ring_cnt;
1883 req->cmpl_ring_id =
bp->cq_ring_id;
1896 DBGP (
"- %s ( ): Failed, type = %x\n", __func__,
type );
1918 DBGP (
"%s\n", __func__ );
1924 DBGP (
"%s\n", __func__ );
1930 DBGP (
"%s\n", __func__ );
1938 DBGP (
"%s\n", __func__ );
1953 DBGP (
"%s\n", __func__ );
1968 DBGP (
"%s\n", __func__ );
1993 DBGP (
"%s\n", __func__ );
2011 DBGP (
"%s\n", __func__ );
2018 DBGP (
"- %s ( ): Failed\n", __func__ );
2033 DBGP (
"%s\n", __func__ );
2039 req->vnic_id =
bp->vnic_id;
2042 DBGP (
"- %s ( ): Failed\n", __func__ );
2055 DBGP (
"%s\n", __func__ );
2064 req->default_rx_ring_id =
bp->rx_ring_id;
2065 req->default_cmpl_ring_id =
bp->cq_ring_id;
2068 req->dflt_ring_grp =
bp->ring_grp_id;
2071 req->vnic_id =
bp->vnic_id;
2072 return wait_resp (
bp,
bp->hwrm_cmd_timeout, cmd_len, __func__ );
2089 DBGP (
"%s \n", __func__ );
2167 for ( ptr = cmds; *ptr; ++ptr ) {
2170 ret = ( *ptr ) (
bp );
2172 DBGP (
"- %s ( ): Failed\n", __func__ );
2179 #define bnxt_down_chip( bp ) bnxt_hwrm_run ( bring_down_chip, bp ) 2180 #define bnxt_up_chip( bp ) bnxt_hwrm_run ( bring_up_chip, bp ) 2181 #define bnxt_down_nic( bp ) bnxt_hwrm_run ( bring_down_nic, bp ) 2182 #define bnxt_up_nic( bp ) bnxt_hwrm_run ( bring_up_nic, bp ) 2183 #define bnxt_up_init( bp ) bnxt_hwrm_run ( bring_up_init, bp ) 2189 DBGP (
"%s\n", __func__ );
2193 DBGP (
"- %s ( ): bnxt_alloc_rings_mem Failed\n", __func__ );
2200 DBGP (
"- %s ( ): bnxt_up_chip Failed\n", __func__ );
2205 DBGP (
"- %s ( ): bnxt_up_nic\n", __func__);
2224 if ( !
bp->vlan_tx &&
bp->vlan_id )
2228 if (
iob_len ( iob ) != prev_len )
2239 if (
bp->er.er_rst_on ) {
2245 DBGP (
"- %s ( ): Failed no bd's available\n", __func__ );
2251 entry =
bp->tx.prod_id;
2253 bp->tx.iob[entry] = iob;
2257 if (
bp->tx.prod_id > entry )
2262 bp->tx.prod_id = entry;
2273 cons_id =
bp->nq.cons_id + cnt;
2274 if ( cons_id >=
bp->nq.ring_cnt ) {
2276 bp->nq.completion_bit ^= 1;
2278 cons_id = cons_id -
bp->nq.ring_cnt;
2280 bp->nq.cons_id = cons_id;
2294 #define BNXT_FW_HEALTH_WIN_OFF 0x3000 2295 #define BNXT_REG_WINDOW_BASE 0x400 2296 #define BNXT_GRC_BASE_MASK 0xfff 2297 #define BNXT_GRC_OFFSET_MASK 0xffc 2310 DBGP (
"bnxt_er_reg_write: reg_addr = %x, reg_val = %x\n", reg_addr, reg_val);
2326 DBGP (
"bnxt_er_reg_read: reg_addr = %x, reg_val = %x\n", reg_addr, reg_val);
2334 switch ( reg_type ) {
2342 reg_val =
readl (
bp->bar0 + ( reg_addr & mask ) );
2345 reg_val =
readl (
bp->bar1 + ( reg_addr & mask ) );
2350 DBGP (
"read_reg_val bp %p addr %x type %x : reg_val = %x\n",
bp, reg_addr, reg_type, reg_val );
2359 switch ( reg_type ) {
2367 writel ( reg_val,
bp->bar0 + ( reg_addr & mask ) );
2370 writel ( reg_val,
bp->bar1 + ( reg_addr & mask ) );
2382 for ( i = 0; i <
bp->er.reg_array_cnt; i++ ) {
2385 delay_time =
bp->er.delay_after_rst[i];
2387 udelay ( delay_time * 100000 );
2396 unsigned short pci_command, new_command;
2399 DBGP (
"%s(hb_task: %d)\n", __func__, hb_task );
2400 if (
bp->er.er_rst_on ) {
2401 if ( timer_running ( &
bp->wait_timer) ) {
2413 if ( present_hb_cnt !=
bp->er.last_fw_hb ) {
2414 bp->er.last_fw_hb = present_hb_cnt;
2420 DBGP (
"%s(): Trigger Error Recovery\n", __func__ );
2421 bp->er.er_rst_on = 1;
2434 udelay (
bp->er.rst_min_dsecs * 100000 );
2440 if (
bp->er.master_pf ) {
2442 udelay (
bp->er.master_wait_period * 100000 );
2448 udelay (
bp->er.master_wait_post_rst * 100000 );
2451 udelay (
bp->er.normal_wait_period * 100000 );
2455 for ( i = 0; i <
bp->er.max_bailout_post_rst; i++ ) {
2457 bp->er.fw_status_reg,
2480 bp->er.er_rst_on = 0;
2489 bp->er.driver_initiated_recovery = 1;
2493 bp->er.driver_initiated_recovery = 0;
2499 bp->er.master_pf = 1;
2501 bp->er.master_pf = 0;
2505 bp->er.fw_status_reg,
2509 bp->er.last_fw_hb = 0;
2511 bp->er.fw_rst_cnt_reg,
2515 bp->er.rst_inprg_reg,
2519 bp->er.recvry_cnt_reg,
2527 DBGP (
"Reset Notify Async event" );
2531 DBGP (
" error recovery initiated\n" );
2535 if (
bp->er.rst_min_dsecs == 0 )
2538 if (
bp->er.rst_max_dsecs == 0 )
2542 bp->er.er_initiate = 1;
2549 DBGP (
"bnxt_link_speed_evt: event data = %lx\n",
2596 u16 old_cid =
bp->cq.cons_id;
2613 switch ( cq_type ) {
2664 if (
bp->cq.cons_id != old_cid )
2672 u16 old_cid =
bp->nq.cons_id;
2684 if ( ( nqp->
v &
NQ_CN_V ) ^
bp->nq.completion_bit )
2691 switch ( nq_type ) {
2733 if (
bp->nq.cons_id != old_cid )
2743 if (
bp->er.driver_initiated_recovery ) {
2766 if (
bp->er.er_initiate ) {
2768 bp->er.er_initiate = 0;
2777 DBGP (
"%s\n", __func__ );
2800 DBGP (
"%s\n", __func__ );
2804 DBGP (
"- %s ( ): alloc_etherdev Failed\n", __func__ );
2828 bp->dma = &pci->
dma;
2848 DBGP (
"- %s ( ): register_netdev Failed\n", __func__ );
2874 DBGP (
"%s\n", __func__ );
2889 DBGP (
"%s - Done\n", __func__ );
#define VNIC_CFG_REQ_ENABLES_MRU
#define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_FEC_CFG_CHANGE
#define dump_evt(cq, ty, id, ring)
#define RING_ALLOC_REQ_RING_TYPE_NQ
#define dbg_tx_avail(bp, a, u)
#define IPXE_VERSION_MAJOR
#define EINVAL
Invalid argument.
void bnxt_link_speed_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
static u32 bnxt_set_ring_info(struct bnxt *bp)
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK
#define HWRM_STAT_CTX_FREE
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
struct arbelprm_rc_send_wqe rc
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
static void bnxt_service_cq(struct net_device *dev)
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
#define RESP_DMA_ADDR(bp)
#define MEDIUM_SPEED_100PAM4_112GBPS
__le32 reset_inprogress_reg_mask
#define iob_put(iobuf, len)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB
struct dma_device dma
DMA device.
static int bnxt_hwrm_ver_get(struct bnxt *bp)
#define dbg_flags(func, flags)
static void bnxt_db_tx(struct bnxt *bp, u32 idx)
#define RX_PKT_V3_CMPL_TYPE_MASK
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
#define MEDIA_AUTO_DETECT_MASK
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_PCIE_CFG
void bnxt_set_txq(struct bnxt *bp, int entry, physaddr_t mapping, int len)
static int bnxt_hwrm_error_recovery_req(struct bnxt *bp)
#define HWRM_PORT_MAC_CFG
#define HWRM_CFA_L2_FILTER_FREE
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
#define BD_NOW(bd, entry, len)
#define IPXE_VERSION_UPDATE
#define SHORT_CMD_SUPPORTED
#define EBUSY
Device or resource busy.
#define dbg_hw_cmd(bp, func, cmd_len, resp_len, cmd_tmo, err)
static unsigned int unsigned int reg
#define LINK_SPEED_FW_200G_PAM4_112
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN
#define MEDIUM_SPEED_2500MBPS
#define dbg_alloc_rx_iob(iob, id, cid)
#define FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE
#define HWRM_CMD_DEFAULT_MULTIPLAYER(a)
#define DEFAULT_NUMBER_OF_CMPL_RINGS
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB
static int bnxt_hwrm_ring_alloc_tx(struct bnxt *bp)
#define dbg_func_resource_qcaps(bp)
int(* open)(struct net_device *netdev)
Open network device.
#define DEFAULT_NUMBER_OF_RING_GRPS
void bnxt_er_task(struct bnxt *bp, u8 hb_task)
#define NQ_CN_TOGGLE_MASK
#define BNXT_REG_WINDOW_BASE
void bnxt_free_hwrm_mem(struct bnxt *bp)
#define ASYNC_EVENT_CMPL_ER_EVENT_DATA1_MASTER_FUNC
void bnxt_port_phy_chg_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
#define SERVICE_NEXT_NQ_BD
#define CQ_DOORBELL_KEY_IDX(a)
int(* hwrm_func_t)(struct bnxt *bp)
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR0
#define FUNC_DRV_RGTR_REQ_ENABLES_VER
#define dbg_short_cmd(sreq, func, len)
__le16 vf_reservation_strategy
static int bnxt_get_device_address(struct bnxt *bp)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB
u32 bnxt_er_get_reg_val(struct bnxt *bp, u32 reg_addr, u32 reg_type, u32 mask)
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID
#define dbg_rxp(iob, rx_len, drop)
#define HWRM_CMD_FLASH_MULTIPLAYER(a)
#define RX_MASK_PROMISCUOUS_MODE
unsigned long driver_data
Arbitrary driver data.
#define LINK_SPEED_FW_10G
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB
struct pci_device_id * ids
PCI ID table.
#define ER_QCFG_RESET_INPRG_REG_ADDR_MASK
static void hwrm_init(struct bnxt *bp, struct input *req, u16 cmd, u16 len)
#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
#define NQ_RING_BUFFER_SIZE
uint32_t type
Operating system type.
int bnxt_hwrm_run(hwrm_func_t cmds[], struct bnxt *bp)
int pci_write_config_word(struct pci_device *pci, unsigned int where, uint16_t value)
Write 16-bit word to PCI configuration space.
static int bnxt_hwrm_func_cfg_req(struct bnxt *bp)
#define PCI_COMMAND_INTX_DISABLE
Interrupt disable.
static void bnxt_db_nq(struct bnxt *bp)
uint32_t readl(volatile uint32_t *io_addr)
Read 32-bit dword from memory-mapped device.
#define dbg_tx_vlan(bp, src, plen, len)
#define HWRM_VERSION_MINOR
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY
void bnxt_rst_reg_val(struct bnxt *bp, u32 reg_addr, u32 reg_val)
#define RING_FREE(bp, rid, flag)
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK
#define MEDIUM_SPEED_200PAM4_112GBPS
#define LINK_POLL_WAIT_TIME
#define CMPL_BASE_TYPE_RX_L2_V3
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Get the size of a PCI BAR.
__le32 reset_inprogress_reg
static int bnxt_hwrm_ring_free_rx(struct bnxt *bp)
static void * bnxt_pci_base(struct pci_device *pdev, unsigned int reg)
#define RING_FREE_REQ_RING_TYPE_TX
static int bnxt_hwrm_ring_alloc_grp(struct bnxt *bp)
static u32 bnxt_tx_avail(struct bnxt *bp)
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_SUPPORTED_LINK_SPEEDS_CHANGE
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR1
#define LINK_SPEED_FW_100G_PAM4_112
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
#define BNXT_FLAG_IS_CHIP_P5
#define PCI_BASE_ADDRESS_0
struct dma_device * dma
DMA device.
#define HWRM_FUNC_BACKING_STORE_CFG
#define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_MASK
#define D3_LINK_SPEED_FW_NUM
#define D3_SPEED_FW_SHIFT
static int bnxt_hwrm_ring_alloc(struct bnxt *bp, u8 type)
int pci_read_config_word(struct pci_device *pci, unsigned int where, uint16_t *value)
Read 16-bit word from PCI configuration space.
void bnxt_link_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
uint32_t data_len
Microcode data size (or 0 to indicate 2000 bytes)
#define BNXT_RX_STD_DMA_SZ
#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
#define PCI_COMMAND
PCI command.
void bnxt_mm_nic(struct bnxt *bp)
static int bnxt_hwrm_cfa_l2_filter_alloc(struct bnxt *bp)
#define LINK_SPEED_DRV_NUM
hwrm_func_t bring_up_init[]
#define BNXT_FLAG_MULTI_HOST
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
#define MEDIUM_SPEED_10GBPS
int bnxt_alloc_rings_mem(struct bnxt *bp)
#define LINK_SPEED_FW_50G_PAM4
static int bnxt_hwrm_func_resource_qcaps(struct bnxt *bp)
#define GET_MEDIUM_SPEED(m)
static int bnxt_hwrm_func_reset_req(struct bnxt *bp)
#define FUNC_CFG_REQ_EVB_MODE_NO_EVB
#define PORT_PHY_QCFG_RESP_LINK_LINK
#define ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_FATAL
#define LINK_SPEED_FW_25G
static int bnxt_init_one(struct pci_device *pci)
static void bnxt_adv_cq_index(struct bnxt *bp, u16 cnt)
static int bnxt_hwrm_nvm_get_variable_req(struct bnxt *bp, u16 data_len, u16 option_num, u16 dimensions, u16 index_0)
#define BNXT_GRC_OFFSET_MASK
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
#define MEDIUM_SPEED_50GBPS
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_ILLEGAL_LINK_SPEED_CFG
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE
#define PCI_SUBSYSTEM_ID
PCI subsystem ID.
struct device dev
Generic device.
#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER
#define BNXT_FLAG_NPAR_MODE
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK
#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_FORCE
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID
void bnxt_add_vlan(struct io_buffer *iob, u16 vlan)
static void bnxt_close(struct net_device *dev)
#define MEDIUM_SPEED_200GBPS
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST
Dynamic memory allocation.
static int bnxt_hwrm_func_qcaps_req(struct bnxt *bp)
#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT
void bnxt_rx_process(struct net_device *dev, struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi)
static struct pci_device_id bnxt_nics[]
#define ER_DFLT_FW_RST_MIN_DSECS
#define RX_MASK_ACCEPT_NONE
#define MEDIUM_SPEED_25GBPS
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
__le32 driver_polling_freq
#define PCI_COMMAND_MASTER
Bus master.
#define RX_PKT_V3_CMPL_TYPE_RX_L2_V3
struct ena_llq_option desc
Descriptor counts.
int bnxt_free_rx_iob(struct bnxt *bp)
#define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_MASK
#define dbg_pci(bp, func, creg)
void dma_free(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
#define DEFAULT_NUMBER_OF_STAT_CTXS
#define STATUS_LINK_ACTIVE
#define HWRM_RING_GRP_ALLOC
#define ENOMEM
Not enough space.
#define ER_QCFG_FW_HEALTH_REG_ADDR_MASK
#define LINK_SPEED_FW_NUM
#define LINK_SPEED_FW_400G_PAM4_112
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX
#define ASYNC_EVENT_CMPL_ER_EVENT_DATA1_RECOVERY_ENABLED
#define NO_MORE_NQ_BD_TO_SERVICE
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56
__le32 master_wait_period
static int bnxt_hwrm_port_phy_qcaps_req(struct bnxt *bp)
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID
#define ER_DFLT_FW_RST_MAX_DSECS
__le32 normal_wait_period
static int bnxt_hwrm_set_rx_mask(struct bnxt *bp, u32 rx_mask)
static int wait_resp(struct bnxt *bp, u32 tmo, u16 len, const char *func)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB
#define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_MASK
int bnxt_post_rx_buffers(struct bnxt *bp)
#define HWRM_QUEUE_QPORTCFG
#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR
#define SHORT_CMD_REQUIRED
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
#define RX_RING_BUFFER_SIZE
#define HWRM_CFA_L2_SET_RX_MASK
static void bnxt_er_task_timer(struct retry_timer *timer, int over __unused)
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
#define DEFAULT_NUMBER_OF_RX_RINGS
#define container_of(ptr, type, field)
Get containing structure.
#define dbg_mem(bp, func)
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
int pci_read_config_dword(struct pci_device *pci, unsigned int where, uint32_t *value)
Read 32-bit dword from PCI configuration space.
#define BNXT_FW_HEALTH_WIN_OFF
#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE
static void bnxt_tx_adjust_pkt(struct bnxt *bp, struct io_buffer *iob)
#define dbg_func_qcaps(bp)
#define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY
static int bnxt_reset_rx_mask(struct bnxt *bp)
void * priv
Driver private data.
#define LINK_SPEED_FW_AUTONEG
static int bnxt_hwrm_ring_free_nq(struct bnxt *bp)
#define __unused
Declare a variable or data structure as unused.
#define ER_QCFG_RCVRY_CNT_REG_ADDR_MASK
struct retry_timer task_timer
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID
static int bnxt_hwrm_ring_alloc_rx(struct bnxt *bp)
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP
#define PCI_BASE_ADDRESS_2
static int bnxt_hwrm_cfa_l2_filter_free(struct bnxt *bp)
void writel(uint32_t data, volatile uint32_t *io_addr)
Write 32-bit dword to memory-mapped device.
#define HWRM_CFA_L2_FILTER_ALLOC
#define LINK_SPEED_FW_40G
#define bnxt_down_nic(bp)
void udelay(unsigned long usecs)
Delay for a fixed number of microseconds.
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED
static int bnxt_hwrm_set_async_event(struct bnxt *bp)
#define BNXT_DMA_ALIGNMENT
static struct net_device * netdev
static __always_inline physaddr_t iob_dma(struct io_buffer *iobuf)
Get I/O buffer DMA address.
#define LINK_SPEED_FW_MASK
static int bnxt_hwrm_ring_alloc_cq(struct bnxt *bp)
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
void bnxt_mm_init_rings(struct bnxt *bp, const char *func)
#define ER_QCFG_RESET_REG_ADDR_SPACE_MASK
#define NQ_CN_TYPE_CQ_NOTIFICATION
static void bnxt_tx_complete(struct net_device *dev, u16 hw_idx)
#define CMPL_BASE_TYPE_RX_L2
static void bnxt_down_pci(struct bnxt *bp)
#define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56
#define HWRM_NA_SIGNATURE
#define ER_QCFG_FW_HB_REG_ADDR_MASK
#define HWRM_FUNC_DRV_RGTR
void unregister_netdev(struct net_device *netdev)
Unregister network device.
static void dev_p5_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag)
static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, u16 idx)
static int bnxt_hwrm_ring_free_tx(struct bnxt *bp)
static int bnxt_get_pci_info(struct bnxt *bp)
#define LINK_SPEED_FW_50G
hwrm_func_t bring_up_chip[]
static void dev_p7_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, u32 toggle)
static void bnxt_remove_one(struct pci_device *pci)
#define TX_RING_BUFFER_SIZE
#define TX_BD_SHORT_FLAGS_LHINT_LT512
#define HWRM_FUNC_RESOURCE_QCAPS
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB
#define BNXT_CQ_INTR_MODE(vf)
#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM
#define dbg_rx_cid(idx, cid)
#define TX_BD_SHORT_FLAGS_LHINT_LT1K
static int bnxt_rx_complete(struct net_device *dev, struct rx_pkt_cmpl *rx)
struct refcnt refcnt
Reference counter.
#define dbg_alloc_rx_iob_fail(iob_idx, cons_id)
struct retry_timer wait_timer
#define HWRM_STAT_CTX_ALLOC
static int bnxt_tx(struct net_device *dev, struct io_buffer *iob)
struct io_buffer * alloc_rx_iob(size_t len, struct dma_device *dma)
Allocate and map I/O buffer for receive DMA.
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
#define ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_MASK
#define CMPL_BASE_TYPE_TX_L2
#define BNXT_ER_TIMER_INTERVAL(x)
#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
static int bnxt_get_link_speed(struct bnxt *bp)
int register_netdev(struct net_device *netdev)
Register network device.
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
#define FW_STATUS_REG_CODE_READY
__le32 fw_health_status_reg
#define BNXT_ER_WAIT_TIMER_INTERVAL(x)
#define MEDIUM_SPEED_1000MBPS
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT
static int bnxt_hwrm_vnic_free(struct bnxt *bp)
#define PORT_MAC_CFG_REQ_LPBK_NONE
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
u8 bnxt_rx_drop(struct bnxt *bp, struct io_buffer *iob, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB
#define dump_tx_pkt(pkt, len, idx)
#define CQ_RING_BUFFER_SIZE
hwrm_func_t bring_down_nic[]
static void hwrm_write_req(struct bnxt *bp, void *req, u32 cnt)
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
static int bnxt_hwrm_ring_free_cq(struct bnxt *bp)
#define PORT_PHY_CFG_REQ_FLAGS_FORCE
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT
#define ER_QCFG_FW_HB_REG_ADDR_SPACE_MASK
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE
#define RX_MASK_ACCEPT_ALL_MULTICAST
static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp)
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
A PCI device ID list entry.
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112
static void bnxt_hwrm_assign_resources(struct bnxt *bp)
#define SHORT_REQ_SIGNATURE_SHORT_CMD
void * dma_alloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer.
void bnxt_mm_init_hwrm(struct bnxt *bp, const char *func)
#define TX_BD_SHORT_FLAGS_LHINT_GTE2K
static int bnxt_hwrm_port_mac_cfg(struct bnxt *bp)
#define VF_CFG_ENABLE_FLAGS
#define TX_BD_SHORT_FLAGS_LHINT_LT2K
static int bnxt_hwrm_ring_alloc_nq(struct bnxt *bp)
static int is_valid_ether_addr(const void *addr)
Check if Ethernet address is valid.
void * memmove(void *dest, const void *src, size_t len) __nonnull
static void bnxt_set_link(struct bnxt *bp)
static int bnxt_query_phy_link(struct bnxt *bp)
#define HWRM_FUNC_DRV_UNRGTR
#define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EEE_CFG_CHANGE
#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT
__le32 err_recovery_cnt_reg
#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
#define dbg_chip_info(bp)
Network device operations.
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
static void bnxt_db_rx(struct bnxt *bp, u32 idx)
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX
struct device * dev
Underlying hardware device.
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT
#define HWRM_PORT_PHY_QCFG
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB
#define MEDIUM_SPEED_50PAM4GBPS
#define TX_IN_USE(a, b, c)
static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
static int bnxt_hwrm_vnic_cfg(struct bnxt *bp)
#define MEDIUM_SPEED_AUTONEG
Network device management.
void start_timer_fixed(struct retry_timer *timer, unsigned long timeout)
Start timer with a specified timeout.
#define dump_cq(cq, id, toggle)
#define CMPL_BASE_TYPE_STAT_EJECT
#define MEDIUM_SPEED_100PAM4GBPS
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
u8 bnxt_is_pci_vf(struct pci_device *pdev)
Check if Virtual Function.
void bnxt_link_speed_chg_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
#define dbg_tx_done(pkt, len, idx)
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
#define PCICFG_ME_REGISTER
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_GRC
#define LINK_SPEED_FW_2_5G
#define BNXT_FLAG_LINK_SPEEDS2
void stop_timer(struct retry_timer *timer)
Stop timer.
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB
#define HWRM_NVM_GET_VARIABLE
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB
#define HWRM_VERSION_UPDATE
#define LINK_DEFAULT_TIMEOUT
#define CMPL_DOORBELL_KEY_CMPL
#define NO_MORE_CQ_BD_TO_SERVICE
#define prn_set_speed(speed)
#define RING_ALLOC_REQ_RING_TYPE_TX
static void short_hwrm_cmd_req(struct bnxt *bp, u16 len)
#define dbg_link_info(bp)
static int bnxt_hwrm_ring_free_grp(struct bnxt *bp)
#define RX_MASK_ACCEPT_MULTICAST
#define dbg_fw_ver(resp, tmo)
void free_rx_iob(struct io_buffer *iobuf)
Unmap and free I/O buffer for receive DMA.
#define ENOBUFS
No buffer space available.
#define MEDIUM_FULL_DUPLEX
#define MEDIUM_SPEED_400PAM4_112GBPS
Media Independent Interface constants.
static int bnxt_get_link_state(struct bnxt *bp)
#define FUNC_CFG_REQ_ENABLES_EVB_MODE
#define RING_ALLOC_REQ_RING_TYPE_RX
static void bnxt_adv_nq_index(struct bnxt *bp, u16 cnt)
#define dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx)
#define RING_FREE_REQ_RING_TYPE_L2_CMPL
static int bnxt_alloc_rx_iob(struct bnxt *bp, u16 cons_id, u16 iob_idx)
#define RING_FREE_REQ_RING_TYPE_RX
void * data
Start of data.
static int bnxt_hwrm_vnic_alloc(struct bnxt *bp)
u32 bnxt_er_reg_write(struct bnxt *bp, u32 reg_addr, u32 reg_val)
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2
#define barrier()
Optimisation barrier.
static int bnxt_hwrm_backing_store_qcfg(struct bnxt *bp)
int bnxt_alloc_hwrm_mem(struct bnxt *bp)
void bnxt_process_er_event(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
#define FUNC_CFG_REQ_ENABLES_NUM_MSIX
#define HWRM_PORT_PHY_CFG
#define LINK_SPEED_FW_100G
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
u8 rx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets from the AP.
#define LINK_SPEED_FW_200G
#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
struct pci_device_id * id
Driver device ID.
void bnxt_rst_er_registers(struct bnxt *bp)
int pci_write_config_dword(struct pci_device *pci, unsigned int where, uint32_t value)
Write 32-bit dword to PCI configuration space.
#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
static int bnxt_open(struct net_device *dev)
#define FUNC_CFG_REQ_ENABLES_NUM_VNICS
void iounmap(volatile const void *io_addr)
Unmap I/O address.
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
static void bnxt_poll(struct net_device *dev)
#define MAX_ETHERNET_PACKET_BUFFER_SIZE
#define DEFAULT_NUMBER_OF_TX_RINGS
#define RING_FREE_REQ_RING_TYPE_NQ
static int bnxt_set_rx_mask(struct bnxt *bp)
static int bnxt_hwrm_func_qcfg_req(struct bnxt *bp)
#define BNXT_FLAG_IS_CHIP_P7
#define SERVICE_NEXT_CQ_BD
#define DBC_MSG_TOGGLE(idx)
#define SET_MEDIUM_DUPLEX(bp, d)
#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_MASK
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB
static u16 bnxt_get_pkt_vlan(char *src)
void mb(void)
Memory barrier.
#define dbg_link_status(bp)
#define DBC_MSG_EPCH(idx)
#define RX_DOORBELL_KEY_RX
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL
void bnxt_process_reset_notify_event(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
static void bnxt_er_wait_timer(struct retry_timer *timer, int over __unused)
#define PCI_SUBSYSTEM_VENDOR_ID
PCI subsystem vendor ID.
#define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_SFT
#define BNXT_FLAG_HWRM_SHORT_CMD_SUPP
#define dbg_num_rings(bp)
#define MEDIUM_SPEED_40GBPS
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56
#define LINK_SPEED_FW_400G_PAM4
#define TX_DOORBELL_KEY_TX
#define dbg_link_state(bp, tmo)
uint32_t flag
Flag number.
#define RING_ALLOC_REQ_INT_MODE_POLL
#define dbg_func_qcfg(bp)
u32 set_rx_mask(u32 rx_mask)
#define PCI_BASE_ADDRESS_4
static void bnxt_service_nq(struct net_device *dev)
#define HWRM_VERSION_MAJOR
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
__le32 master_wait_post_reset
#define LINK_SPEED_FW_100G_PAM4
hwrm_func_t bring_down_chip[]
#define MEDIUM_SPEED_100GBPS
hwrm_func_t bring_up_nic[]
#define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE
void bnxt_free_rings_mem(struct bnxt *bp)
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE
u32 bnxt_er_reg_read(struct bnxt *bp, u32 reg_addr)
static void bnxt_set_rx_desc(u8 *buf, struct io_buffer *iob, u16 cid, u32 idx)
#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME
#define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_PAUSE_CFG_CHANGE
static struct net_device_operations bnxt_netdev_ops
#define HWRM_FUNC_BACKING_STORE_QCFG
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB
#define HWRM_RING_GRP_FREE
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0
#define BNXT_GRC_BASE_MASK
#define BNXT_FLAG_RESOURCE_QCAPS_SUPPORT
#define NULL
NULL pointer (VOID *)
#define SET_LINK(p, m, s)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB
#define DBC_DBC_TYPE_NQ_ARM
#define PCI_ROM(_vendor, _device, _name, _description, _data)
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE
#define CMPL_BASE_TYPE_MASK
#define SET_MEDIUM_SPEED(bp, s)
static int bnxt_hwrm_backing_store_cfg(struct bnxt *bp)
#define GRC_COM_CHAN_TRIG
#define MEDIUM_SPEED_400PAM4GBPS
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST
void writeq(uint64_t data, volatile uint64_t *io_addr)
Write 64-bit qword to memory-mapped device.
struct bofm_section_header done
#define HWRM_CMD_POLL_WAIT_TIME
#define ER_QCFG_RESET_REG_ADDR_MASK
#define MEDIA_AUTO_DETECT_SHIFT
#define IPXE_VERSION_MINOR
#define GRC_COM_CHAN_BASE
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
#define HWRM_PORT_PHY_QCAPS
u8 tx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets to the AP.
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT
#define HWRM_CMD_DEFAULT_TIMEOUT
static int bnxt_get_phy_link(struct bnxt *bp)
if(natsemi->flags &NATSEMI_64BIT) return 1
#define ER_QCFG_FW_RESET_CNT_REG_ADDR_MASK
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX
#define BNXT_FLAG_IS_CHIP_P5_PLUS
void * memset(void *dest, int character, size_t len) __nonnull
int pci_read_config_byte(struct pci_device *pci, unsigned int where, uint8_t *value)
Read byte from PCI configuration space.
#define DBC_MSG_XID(xid, flg)
struct pci_driver bnxt_pci_driver __pci_driver
int bnxt_hwrm_ring_free(struct bnxt *bp, u16 ring_id, u8 ring_type)
static void bnxt_db_cq(struct bnxt *bp)