iPXE
bnxt.c
Go to the documentation of this file.
1
2FILE_LICENCE ( GPL2_ONLY );
3
4#include <mii.h>
5#include <stdio.h>
6#include <string.h>
7#include <errno.h>
8#include <unistd.h>
9#include <byteswap.h>
10#include <ipxe/pci.h>
11#include <ipxe/iobuf.h>
12#include <ipxe/dma.h>
13#include <ipxe/timer.h>
14#include <ipxe/malloc.h>
15#include <ipxe/if_ether.h>
16#include <ipxe/ethernet.h>
17#include <ipxe/netdevice.h>
18#include "bnxt.h"
19#include "bnxt_dbg.h"
20
21static void bnxt_service_cq ( struct net_device *dev );
22static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx );
23static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt );
24static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt );
25static int bnxt_rx_complete ( struct net_device *dev, struct rx_pkt_cmpl *rx );
26void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt );
27
28
29static struct pci_device_id bnxt_nics[] = {
30 PCI_ROM( 0x14e4, 0x1604, "14e4-1604", "Broadcom BCM957454", 0 ),
31 PCI_ROM( 0x14e4, 0x1605, "14e4-1605", "Broadcom BCM957454 RDMA", 0 ),
32 PCI_ROM( 0x14e4, 0x1606, "14e4-1606", "Broadcom BCM957454 RDMA VF", BNXT_FLAG_PCI_VF ),
33 PCI_ROM( 0x14e4, 0x1607, "bcm957454-1607", "Broadcom BCM957454 HV VF", BNXT_FLAG_PCI_VF ),
34 PCI_ROM( 0x14e4, 0x1608, "bcm957454-1608", "Broadcom BCM957454 RDMA HV VF", BNXT_FLAG_PCI_VF ),
35 PCI_ROM( 0x14e4, 0x1609, "14e4-1609", "Broadcom BCM957454 VF", BNXT_FLAG_PCI_VF ),
36 PCI_ROM( 0x14e4, 0x1614, "14e4-1614", "Broadcom BCM957454", 0 ),
37 PCI_ROM( 0x14e4, 0x16bd, "bcm95741x-16bd", "Broadcom BCM95741x RDMA_HV_VF", BNXT_FLAG_PCI_VF ),
38 PCI_ROM( 0x14e4, 0x16c0, "14e4-16c0", "Broadcom BCM957417", 0 ),
39 PCI_ROM( 0x14e4, 0x16c1, "14e4-16c1", "Broadcom BCM95741x VF", BNXT_FLAG_PCI_VF ),
40 PCI_ROM( 0x14e4, 0x16c5, "bcm95741x-16c5", "Broadcom BCM95741x HV VF", BNXT_FLAG_PCI_VF ),
41 PCI_ROM( 0x14e4, 0x16c8, "14e4-16c8", "Broadcom BCM957301", 0 ),
42 PCI_ROM( 0x14e4, 0x16c9, "14e4-16c9", "Broadcom BCM957302", 0 ),
43 PCI_ROM( 0x14e4, 0x16ca, "14e4-16ca", "Broadcom BCM957304", 0 ),
44 PCI_ROM( 0x14e4, 0x16cc, "14e4-16cc", "Broadcom BCM957417 MF", 0 ),
45 PCI_ROM( 0x14e4, 0x16cd, "14e4-16cd", "Broadcom BCM958700", 0 ),
46 PCI_ROM( 0x14e4, 0x16ce, "14e4-16ce", "Broadcom BCM957311", 0 ),
47 PCI_ROM( 0x14e4, 0x16cf, "14e4-16cf", "Broadcom BCM957312", 0 ),
48 PCI_ROM( 0x14e4, 0x16d0, "14e4-16d0", "Broadcom BCM957402", 0 ),
49 PCI_ROM( 0x14e4, 0x16d1, "14e4-16d1", "Broadcom BCM957404", 0 ),
50 PCI_ROM( 0x14e4, 0x16d2, "14e4-16d2", "Broadcom BCM957406", 0 ),
51 PCI_ROM( 0x14e4, 0x16d4, "14e4-16d4", "Broadcom BCM957402 MF", 0 ),
52 PCI_ROM( 0x14e4, 0x16d5, "14e4-16d5", "Broadcom BCM957407", 0 ),
53 PCI_ROM( 0x14e4, 0x16d6, "14e4-16d6", "Broadcom BCM957412", 0 ),
54 PCI_ROM( 0x14e4, 0x16d7, "14e4-16d7", "Broadcom BCM957414", 0 ),
55 PCI_ROM( 0x14e4, 0x16d8, "14e4-16d8", "Broadcom BCM957416", 0 ),
56 PCI_ROM( 0x14e4, 0x16d9, "14e4-16d9", "Broadcom BCM957417", 0 ),
57 PCI_ROM( 0x14e4, 0x16da, "14e4-16da", "Broadcom BCM957402", 0 ),
58 PCI_ROM( 0x14e4, 0x16db, "14e4-16db", "Broadcom BCM957404", 0 ),
59 PCI_ROM( 0x14e4, 0x16dc, "14e4-16dc", "Broadcom BCM95741x VF", BNXT_FLAG_PCI_VF ),
60 PCI_ROM( 0x14e4, 0x16de, "14e4-16de", "Broadcom BCM957412 MF", 0 ),
61 PCI_ROM( 0x14e4, 0x16df, "14e4-16df", "Broadcom BCM957314", 0 ),
62 PCI_ROM( 0x14e4, 0x16e0, "14e4-16e0", "Broadcom BCM957317", 0 ),
63 PCI_ROM( 0x14e4, 0x16e2, "14e4-16e2", "Broadcom BCM957417", 0 ),
64 PCI_ROM( 0x14e4, 0x16e3, "14e4-16e3", "Broadcom BCM957416", 0 ),
65 PCI_ROM( 0x14e4, 0x16e4, "14e4-16e4", "Broadcom BCM957317", 0 ),
66 PCI_ROM( 0x14e4, 0x16e7, "14e4-16e7", "Broadcom BCM957404 MF", 0 ),
67 PCI_ROM( 0x14e4, 0x16e8, "14e4-16e8", "Broadcom BCM957406 MF", 0 ),
68 PCI_ROM( 0x14e4, 0x16e9, "14e4-16e9", "Broadcom BCM957407", 0 ),
69 PCI_ROM( 0x14e4, 0x16ea, "14e4-16ea", "Broadcom BCM957407 MF", 0 ),
70 PCI_ROM( 0x14e4, 0x16eb, "14e4-16eb", "Broadcom BCM957412 RDMA MF", 0 ),
71 PCI_ROM( 0x14e4, 0x16ec, "14e4-16ec", "Broadcom BCM957414 MF", 0 ),
72 PCI_ROM( 0x14e4, 0x16ed, "14e4-16ed", "Broadcom BCM957414 RDMA MF", 0 ),
73 PCI_ROM( 0x14e4, 0x16ee, "14e4-16ee", "Broadcom BCM957416 MF", 0 ),
74 PCI_ROM( 0x14e4, 0x16ef, "14e4-16ef", "Broadcom BCM957416 RDMA MF", 0 ),
75 PCI_ROM( 0x14e4, 0x16f0, "14e4-16f0", "Broadcom BCM957320", 0 ),
76 PCI_ROM( 0x14e4, 0x16f1, "14e4-16f1", "Broadcom BCM957320", 0 ),
77 PCI_ROM( 0x14e4, 0x1750, "14e4-1750", "Broadcom BCM957508", 0 ),
78 PCI_ROM( 0x14e4, 0x1751, "14e4-1751", "Broadcom BCM957504", 0 ),
79 PCI_ROM( 0x14e4, 0x1752, "14e4-1752", "Broadcom BCM957502", 0 ),
80 PCI_ROM( 0x14e4, 0x1760, "14e4-1760", "Broadcom BCM957608", 0 ),
81 PCI_ROM( 0x14e4, 0x1800, "14e4-1800", "Broadcom BCM957502 MF", 0 ),
82 PCI_ROM( 0x14e4, 0x1801, "14e4-1801", "Broadcom BCM957504 MF", 0 ),
83 PCI_ROM( 0x14e4, 0x1802, "14e4-1802", "Broadcom BCM957508 MF", 0 ),
84 PCI_ROM( 0x14e4, 0x1803, "14e4-1803", "Broadcom BCM957502 RDMA MF", 0 ),
85 PCI_ROM( 0x14e4, 0x1804, "14e4-1804", "Broadcom BCM957504 RDMA MF", 0 ),
86 PCI_ROM( 0x14e4, 0x1805, "14e4-1805", "Broadcom BCM957508 RDMA MF", 0 ),
87 PCI_ROM( 0x14e4, 0x1806, "14e4-1806", "Broadcom BCM9575xx VF", BNXT_FLAG_PCI_VF ),
88 PCI_ROM( 0x14e4, 0x1807, "14e4-1807", "Broadcom BCM9575xx RDMA VF", BNXT_FLAG_PCI_VF ),
89 PCI_ROM( 0x14e4, 0x1808, "14e4-1808", "Broadcom BCM9575xx HV VF", BNXT_FLAG_PCI_VF ),
90 PCI_ROM( 0x14e4, 0x1809, "14e4-1809", "Broadcom BCM9575xx RDMA HV VF", BNXT_FLAG_PCI_VF ),
91 PCI_ROM( 0x14e4, 0x1819, "bcm95760x-1819", "Broadcom BCM95760x VF", BNXT_FLAG_PCI_VF ),
92 PCI_ROM( 0x14e4, 0x181b, "bcm95760x-181b", "Broadcom BCM95760x HV VF", BNXT_FLAG_PCI_VF ),
93};
94
95/**
96 * Check if Virtual Function
97 */
99{
100 if ( FLAG_TEST ( pdev->id->driver_data, BNXT_FLAG_PCI_VF ) ) {
101 return 1;
102 }
103 return 0;
104}
105
106static void bnxt_down_pci ( struct bnxt *bp )
107{
108 DBGP ( "%s\n", __func__ );
109 if ( bp->bar2 ) {
110 iounmap ( bp->bar2 );
111 bp->bar2 = NULL;
112 }
113 if ( bp->bar1 ) {
114 iounmap ( bp->bar1 );
115 bp->bar1 = NULL;
116 }
117 if ( bp->bar0 ) {
118 iounmap ( bp->bar0 );
119 bp->bar0 = NULL;
120 }
121}
122
123static void *bnxt_pci_base ( struct pci_device *pdev, unsigned int reg )
124{
125 unsigned long reg_base, reg_size;
126
127 reg_base = pci_bar_start ( pdev, reg );
128 reg_size = pci_bar_size ( pdev, reg );
129 return pci_ioremap ( pdev, reg_base, reg_size );
130}
131
132static int bnxt_get_pci_info ( struct bnxt *bp )
133{
134 u16 cmd_reg = 0;
135
136 DBGP ( "%s\n", __func__ );
137 /* Disable Interrupt */
138 pci_read_config_word ( bp->pdev, PCI_COMMAND, &bp->cmd_reg );
139 cmd_reg = bp->cmd_reg | PCI_COMMAND_INTX_DISABLE;
140 pci_write_config_word ( bp->pdev, PCI_COMMAND, cmd_reg );
141 pci_read_config_word ( bp->pdev, PCI_COMMAND, &cmd_reg );
142
143 /* SSVID */
144 pci_read_config_word ( bp->pdev,
146 &bp->subsystem_vendor );
147
148 /* SSDID */
149 pci_read_config_word ( bp->pdev,
151 &bp->subsystem_device );
152
153 /* Function Number */
154 pci_read_config_byte ( bp->pdev,
156 &bp->pf_num );
157
158 /* Get Bar Address */
159 bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 );
160 bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 );
161 bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 );
162
163 /* Virtual function */
164 bp->vf = bnxt_is_pci_vf ( bp->pdev );
165
166 dbg_pci ( bp, __func__, cmd_reg );
167 return STATUS_SUCCESS;
168}
169
170static int bnxt_get_device_address ( struct bnxt *bp )
171{
172 struct net_device *dev = bp->dev;
173
174 DBGP ( "%s\n", __func__ );
175 memcpy ( &dev->hw_addr[0], ( char * )&bp->mac_addr[0], ETH_ALEN );
176 if ( !is_valid_ether_addr ( &dev->hw_addr[0] ) ) {
177 DBGP ( "- %s ( ): Failed\n", __func__ );
178 return -EINVAL;
179 }
180
181 return STATUS_SUCCESS;
182}
183
184static void bnxt_set_link ( struct bnxt *bp )
185{
186 if ( bp->link_status == STATUS_LINK_ACTIVE )
187 netdev_link_up ( bp->dev );
188 else
189 netdev_link_down ( bp->dev );
190}
191
192static void dev_p5_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag )
193{
194 void *off;
195 u64 val;
196
197 if ( bp->vf )
198 off = ( void * ) ( bp->bar1 + DB_OFFSET_VF );
199 else
200 off = ( void * ) ( bp->bar1 + DB_OFFSET_PF );
201
202 val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) |
203 ( u64 )DBC_MSG_IDX ( idx );
204 writeq ( val, off );
205}
206
207static void dev_p7_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, u32 toggle )
208{
209 void *off;
210 u64 val;
211
212 off = ( void * ) ( bp->bar1 );
213
214 val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) |
215 ( u64 )DBC_MSG_IDX ( idx ) |
216 ( u64 )DBC_MSG_EPCH ( epoch ) |
217 ( u64 )DBC_MSG_TOGGLE ( toggle );
218 writeq ( val, off );
219}
220
221static void bnxt_db_nq ( struct bnxt *bp )
222{
223 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
224 dev_p7_db ( bp, ( u32 )bp->nq.cons_id,
225 ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM,
226 ( u32 )bp->nq.epoch, 0 );
227 else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
228 dev_p5_db ( bp, ( u32 )bp->nq.cons_id,
229 ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM );
230 else
231 writel ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) );
232}
233
234static void bnxt_db_cq ( struct bnxt *bp )
235{
236 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
237 dev_p7_db ( bp, ( u32 )bp->cq.cons_id,
238 ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ,
239 ( u32 )bp->cq.epoch, ( u32 )bp->nq.toggle );
240 else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
241 dev_p5_db ( bp, ( u32 )bp->cq.cons_id,
242 ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ);
243 else
244 writel ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ),
245 ( bp->bar1 + 0 ) );
246}
247
248static void bnxt_db_rx ( struct bnxt *bp, u32 idx )
249{
250 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
251 dev_p7_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ,
252 ( u32 )bp->rx.epoch, 0 );
253 else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
254 dev_p5_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ );
255 else
256 writel ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) );
257}
258
259static void bnxt_db_tx ( struct bnxt *bp, u32 idx )
260{
261 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
262 dev_p7_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ,
263 ( u32 )bp->tx.epoch, 0 );
264 else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
265 dev_p5_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ );
266 else
267 writel ( ( u32 ) ( TX_DOORBELL_KEY_TX | idx ),
268 ( bp->bar1 + 0 ) );
269}
270
271void bnxt_add_vlan ( struct io_buffer *iob, u16 vlan )
272{
273 char *src = ( char * )iob->data;
274 u16 len = iob_len ( iob );
275
276 memmove ( ( char * )&src[MAC_HDR_SIZE + VLAN_HDR_SIZE],
277 ( char * )&src[MAC_HDR_SIZE],
278 ( len - MAC_HDR_SIZE ) );
279
280 * ( u16 * ) ( &src[MAC_HDR_SIZE] ) = BYTE_SWAP_S ( ETHERTYPE_VLAN );
281 * ( u16 * ) ( &src[MAC_HDR_SIZE + 2] ) = BYTE_SWAP_S ( vlan );
282 iob_put ( iob, VLAN_HDR_SIZE );
283}
284
285static u16 bnxt_get_pkt_vlan ( char *src )
286{
287 if ( * ( ( u16 * )&src[MAC_HDR_SIZE] ) == BYTE_SWAP_S ( ETHERTYPE_VLAN ) )
288 return BYTE_SWAP_S ( * ( ( u16 * )&src[MAC_HDR_SIZE + 2] ) );
289 return 0;
290}
291
292static inline u32 bnxt_tx_avail ( struct bnxt *bp )
293{
294 u32 avail;
295 u32 use;
296
297 barrier ( );
298 avail = TX_AVAIL ( bp->tx.ring_cnt );
299 use = TX_IN_USE ( bp->tx.prod_id, bp->tx.cons_id, bp->tx.ring_cnt );
300 dbg_tx_avail ( bp, avail, use );
301 return ( avail-use );
302}
303
304void bnxt_set_txq ( struct bnxt *bp, int entry, physaddr_t mapping, int len )
305{
306 struct tx_bd_short *prod_bd;
307
308 prod_bd = ( struct tx_bd_short * )BD_NOW ( bp->tx.bd_virt,
309 entry, sizeof ( struct tx_bd_short ) );
310 if ( len < 512 )
312 else if ( len < 1024 )
314 else if ( len < 2048 )
316 else
318 prod_bd->flags_type |= TX_BD_FLAGS;
319 prod_bd->dma = mapping;
320 prod_bd->len = len;
321 prod_bd->opaque = ( u32 )entry;
322}
323
324static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx )
325{
326 struct bnxt *bp = dev->priv;
327 struct io_buffer *iob;
328
329 iob = bp->tx.iob[hw_idx];
330 dbg_tx_done ( iob->data, iob_len ( iob ), hw_idx );
331 netdev_tx_complete ( dev, iob );
332 bp->tx.cons_id = NEXT_IDX ( hw_idx, bp->tx.ring_cnt );
333 bp->tx.cnt++;
334 dump_tx_stat ( bp );
335}
336
337int bnxt_free_rx_iob ( struct bnxt *bp )
338{
339 unsigned int i;
340
341 DBGP ( "%s\n", __func__ );
342 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RX_IOB ) ) )
343 return STATUS_SUCCESS;
344
345 for ( i = 0; i < bp->rx.buf_cnt; i++ ) {
346 if ( bp->rx.iob[i] ) {
347 free_rx_iob ( bp->rx.iob[i] );
348 bp->rx.iob[i] = NULL;
349 }
350 }
351 bp->rx.iob_cnt = 0;
352
353 FLAG_RESET ( bp->flag_hwrm, VALID_RX_IOB );
354 return STATUS_SUCCESS;
355}
356
357static void bnxt_set_rx_desc ( u8 *buf, struct io_buffer *iob,
358 u16 cid, u32 idx )
359{
360 struct rx_prod_pkt_bd *desc;
361 u16 off = cid * sizeof ( struct rx_prod_pkt_bd );
362
363 desc = ( struct rx_prod_pkt_bd * )&buf[off];
366 desc->opaque = idx;
367 desc->dma = iob_dma ( iob );
368}
369
370static int bnxt_alloc_rx_iob ( struct bnxt *bp, u16 cons_id, u16 iob_idx )
371{
372 struct io_buffer *iob;
373
374 iob = alloc_rx_iob ( BNXT_RX_STD_DMA_SZ, bp->dma );
375 if ( !iob ) {
376 DBGP ( "- %s ( ): alloc_iob Failed\n", __func__ );
377 return -ENOMEM;
378 }
379
380 dbg_alloc_rx_iob ( iob, iob_idx, cons_id );
381 bnxt_set_rx_desc ( ( u8 * )bp->rx.bd_virt, iob, cons_id,
382 ( u32 ) iob_idx );
383 bp->rx.iob[iob_idx] = iob;
384 return 0;
385}
386
388{
389 u16 cons_id = ( bp->rx.cons_id % bp->rx.ring_cnt );
390 u16 iob_idx;
391
392 while ( bp->rx.iob_cnt < bp->rx.buf_cnt ) {
393 iob_idx = ( cons_id % bp->rx.buf_cnt );
394 if ( !bp->rx.iob[iob_idx] ) {
395 if ( bnxt_alloc_rx_iob ( bp, cons_id, iob_idx ) < 0 ) {
396 dbg_alloc_rx_iob_fail ( iob_idx, cons_id );
397 break;
398 }
399 }
400 cons_id = NEXT_IDX ( cons_id, bp->rx.ring_cnt );
401 /* If the ring has wrapped, flip the epoch bit */
402 if ( iob_idx > cons_id )
403 bp->rx.epoch ^= 1;
404 bp->rx.iob_cnt++;
405 }
406
407 if ( cons_id != bp->rx.cons_id ) {
408 dbg_rx_cid ( bp->rx.cons_id, cons_id );
409 bp->rx.cons_id = cons_id;
410 bnxt_db_rx ( bp, ( u32 )cons_id );
411 }
412
413 FLAG_SET ( bp->flag_hwrm, VALID_RX_IOB );
414 return STATUS_SUCCESS;
415}
416
417u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob,
418 struct rx_pkt_cmpl *rx_cmp,
419 struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len )
420{
421 struct rx_pkt_v3_cmpl *rx_cmp_v3 = ( struct rx_pkt_v3_cmpl * )rx_cmp;
422 struct rx_pkt_v3_cmpl_hi *rx_cmp_hi_v3 = ( struct rx_pkt_v3_cmpl_hi * )rx_cmp_hi;
423 u8 *rx_buf = ( u8 * )iob->data;
424 u16 err_flags;
425 u8 ignore_chksum_err = 0;
426 int i;
427
428 if ( ( rx_cmp_v3->flags_type & RX_PKT_V3_CMPL_TYPE_MASK ) ==
430 err_flags = rx_cmp_hi_v3->errors_v2 >> RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_SFT;
431 } else
432 err_flags = rx_cmp_hi->errors_v2 >> RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT;
433 if ( rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21 )
434 ignore_chksum_err = 1;
435
436 if ( err_flags && !ignore_chksum_err ) {
437 bp->rx.drop_err++;
438 return 1;
439 }
440
441 for ( i = 0; i < 6; i++ ) {
442 if ( rx_buf[6 + i] != bp->mac_addr[i] )
443 break;
444 }
445
446 /* Drop the loopback packets */
447 if ( i == 6 ) {
448 bp->rx.drop_lb++;
449 return 2;
450 }
451
452 iob_put ( iob, rx_len );
453
454 bp->rx.good++;
455 return 0;
456}
457
458static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt )
459{
460 u16 cons_id;
461
462 cons_id = bp->cq.cons_id + cnt;
463 if ( cons_id >= bp->cq.ring_cnt) {
464 /* Toggle completion bit when the ring wraps. */
465 bp->cq.completion_bit ^= 1;
466 bp->cq.epoch ^= 1;
467 cons_id = cons_id - bp->cq.ring_cnt;
468 }
469 bp->cq.cons_id = cons_id;
470}
471
472void bnxt_rx_process ( struct net_device *dev, struct bnxt *bp,
473 struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi )
474{
475 u32 desc_idx = rx_cmp->opaque;
476 struct io_buffer *iob = bp->rx.iob[desc_idx];
477 u8 drop;
478
479 dump_rx_bd ( rx_cmp, rx_cmp_hi, desc_idx );
480 assert ( iob );
481 drop = bnxt_rx_drop ( bp, iob, rx_cmp, rx_cmp_hi, rx_cmp->len );
482 dbg_rxp ( iob->data, rx_cmp->len, drop );
483 if ( drop )
484 netdev_rx_err ( dev, iob, -EINVAL );
485 else
486 netdev_rx ( dev, iob );
487
488 bp->rx.cnt++;
489 bp->rx.iob[desc_idx] = NULL;
490 bp->rx.iob_cnt--;
492 bnxt_adv_cq_index ( bp, 2 ); /* Rx completion is 2 entries. */
493 dbg_rx_stat ( bp );
494}
495
496static int bnxt_rx_complete ( struct net_device *dev,
497 struct rx_pkt_cmpl *rx_cmp )
498{
499 struct bnxt *bp = dev->priv;
500 struct rx_pkt_cmpl_hi *rx_cmp_hi;
501 u8 cmpl_bit = bp->cq.completion_bit;
502
503 if ( bp->cq.cons_id == ( bp->cq.ring_cnt - 1 ) ) {
504 rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) CQ_DMA_ADDR ( bp );
505 cmpl_bit ^= 0x1; /* Ring has wrapped. */
506 } else
507 rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) ( rx_cmp+1 );
508
509 if ( ! ( ( rx_cmp_hi->errors_v2 & RX_PKT_CMPL_V2 ) ^ cmpl_bit ) ) {
510 bnxt_rx_process ( dev, bp, rx_cmp, rx_cmp_hi );
511 return SERVICE_NEXT_CQ_BD;
512 } else
514}
515
516void bnxt_mm_init_hwrm ( struct bnxt *bp, const char *func )
517{
518 DBGP ( "%s\n", __func__ );
519 memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
520 memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
521 memset ( bp->hwrm_addr_dma, 0, DMA_BUFFER_SIZE );
522 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
523 bp->hwrm_cmd_timeout = HWRM_CMD_DEFAULT_TIMEOUT;
524 dbg_mem ( bp, func );
525}
526
527void bnxt_mm_init_rings ( struct bnxt *bp, const char *func )
528{
529 DBGP ( "%s\n", __func__ );
530 memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE );
531 memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE );
532 memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE );
533 memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE );
534
535 bp->link_status = STATUS_LINK_DOWN;
536 bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
538 bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
539 bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
540 bp->tx.ring_cnt = MAX_TX_DESC_CNT;
541 bp->rx.ring_cnt = MAX_RX_DESC_CNT;
542 bp->rx.buf_cnt = NUM_RX_BUFFERS;
543 dbg_mem ( bp, func );
544}
545
546void bnxt_mm_nic ( struct bnxt *bp )
547{
548 DBGP ( "%s\n", __func__ );
549 memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE );
550 memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE );
551 memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE );
552 memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE );
553 bp->nq.cons_id = 0;
554 bp->nq.completion_bit = 0x1;
555 bp->nq.epoch = 0;
556 bp->nq.toggle = 0;
557 bp->cq.cons_id = 0;
558 bp->cq.completion_bit = 0x1;
559 bp->cq.epoch = 0;
560 bp->tx.prod_id = 0;
561 bp->tx.cons_id = 0;
562 bp->tx.epoch = 0;
563 bp->rx.cons_id = 0;
564 bp->rx.iob_cnt = 0;
565 bp->rx.epoch = 0;
566
568 bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
569 bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
570 bp->tx.ring_cnt = MAX_TX_DESC_CNT;
571 bp->rx.ring_cnt = MAX_RX_DESC_CNT;
572 bp->rx.buf_cnt = NUM_RX_BUFFERS;
573}
574
576{
577 DBGP ( "%s\n", __func__ );
578 if ( bp->nq.bd_virt ) {
579 dma_free ( &bp->nq_mapping, bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
580 bp->nq.bd_virt = NULL;
581 }
582
583 if ( bp->cq.bd_virt ) {
584 dma_free ( &bp->cq_mapping, bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
585 bp->cq.bd_virt = NULL;
586 }
587
588 if ( bp->rx.bd_virt ) {
589 dma_free ( &bp->rx_mapping, bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
590 bp->rx.bd_virt = NULL;
591 }
592
593 if ( bp->tx.bd_virt ) {
594 dma_free ( &bp->tx_mapping, bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
595 bp->tx.bd_virt = NULL;
596 }
597
598 DBGP ( "- %s ( ): - Done\n", __func__ );
599}
600
601void bnxt_free_hwrm_mem ( struct bnxt *bp )
602{
603 DBGP ( "%s\n", __func__ );
604 if ( bp->hwrm_addr_dma ) {
605 dma_free ( &bp->dma_mapped, bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
606 bp->hwrm_addr_dma = NULL;
607 }
608
609 if ( bp->hwrm_addr_resp ) {
610 dma_free ( &bp->resp_mapping, bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
611 bp->hwrm_addr_resp = NULL;
612 }
613
614 if ( bp->hwrm_addr_req ) {
615 dma_free ( &bp->req_mapping, bp->hwrm_addr_req, REQ_BUFFER_SIZE );
616 bp->hwrm_addr_req = NULL;
617 }
618 DBGP ( "- %s ( ): - Done\n", __func__ );
619}
620
622{
623 DBGP ( "%s\n", __func__ );
624 bp->hwrm_addr_req = dma_alloc ( bp->dma, &bp->req_mapping,
626 bp->hwrm_addr_resp = dma_alloc ( bp->dma, &bp->resp_mapping,
628 bp->hwrm_addr_dma = dma_alloc ( bp->dma, &bp->dma_mapped,
630
631 if ( bp->hwrm_addr_req &&
632 bp->hwrm_addr_resp &&
633 bp->hwrm_addr_dma ) {
634 bnxt_mm_init_hwrm ( bp, __func__ );
635 return STATUS_SUCCESS;
636 }
637
638 DBGP ( "- %s ( ): Failed\n", __func__ );
640 return -ENOMEM;
641}
642
644{
645 DBGP ( "%s\n", __func__ );
646 bp->tx.bd_virt = dma_alloc ( bp->dma, &bp->tx_mapping,
648 bp->rx.bd_virt = dma_alloc ( bp->dma, &bp->rx_mapping,
650 bp->cq.bd_virt = dma_alloc ( bp->dma, &bp->cq_mapping,
652 bp->nq.bd_virt = dma_alloc ( bp->dma, &bp->nq_mapping,
654 if ( bp->tx.bd_virt &&
655 bp->rx.bd_virt &&
656 bp->nq.bd_virt &&
657 bp->cq.bd_virt ) {
658 bnxt_mm_init_rings ( bp, __func__ );
659 return STATUS_SUCCESS;
660 }
661
662 DBGP ( "- %s ( ): Failed\n", __func__ );
664 return -ENOMEM;
665}
666
667static void hwrm_init ( struct bnxt *bp, struct input *req, u16 cmd, u16 len )
668{
669 memset ( req, 0, len );
670 req->req_type = cmd;
673 req->resp_addr = RESP_DMA_ADDR ( bp );
674 req->seq_id = bp->seq_id++;
675}
676
677static void hwrm_write_req ( struct bnxt *bp, void *req, u32 cnt )
678{
679 u32 i = 0;
680
681 for ( i = 0; i < cnt; i++ ) {
682 writel ( ( ( u32 * )req )[i],
683 ( bp->bar0 + GRC_COM_CHAN_BASE + ( i * 4 ) ) );
684 }
685 writel ( 0x1, ( bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG ) );
686}
687
688static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len )
689{
690 struct hwrm_short_input sreq;
691
692 memset ( &sreq, 0, sizeof ( struct hwrm_short_input ) );
693 sreq.req_type = ( u16 ) ( ( struct input * ) REQ_DMA_ADDR (bp ) )->req_type;
695 sreq.size = len;
696 sreq.req_addr = REQ_DMA_ADDR ( bp );
697 mdelay ( 100 );
698 dbg_short_cmd ( ( u8 * )&sreq, __func__,
699 sizeof ( struct hwrm_short_input ) );
700 hwrm_write_req ( bp, &sreq, sizeof ( struct hwrm_short_input ) / 4 );
701}
702
703static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func )
704{
705 struct input *req = ( struct input * ) REQ_DMA_ADDR ( bp );
706 struct output *resp = ( struct output * ) RESP_DMA_ADDR ( bp );
707 u8 *ptr = ( u8 * )resp;
708 u32 idx;
709 u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER ( ( u32 )tmo );
710 u16 resp_len = 0;
711 u16 ret = STATUS_TIMEOUT;
712
713 if ( len > bp->hwrm_max_req_len )
715 else
716 hwrm_write_req ( bp, req, ( u32 ) ( len / 4 ) );
717
718 for ( idx = 0; idx < wait_cnt; idx++ ) {
719 resp_len = resp->resp_len;
720 if ( resp->seq_id == req->seq_id &&
721 resp->req_type == req->req_type &&
722 ptr[resp_len - 1] == 1 ) {
723 bp->last_resp_code = resp->error_code;
724 ret = resp->error_code;
725 break;
726 }
728 }
729 dbg_hw_cmd ( bp, func, len, resp_len, tmo, ret );
730 return ( int )ret;
731}
732
733static int bnxt_hwrm_ver_get ( struct bnxt *bp )
734{
735 u16 cmd_len = ( u16 )sizeof ( struct hwrm_ver_get_input );
736 struct hwrm_ver_get_input *req;
737 struct hwrm_ver_get_output *resp;
738 int rc;
739
740 DBGP ( "%s\n", __func__ );
741 req = ( struct hwrm_ver_get_input * ) REQ_DMA_ADDR ( bp );
742 resp = ( struct hwrm_ver_get_output * ) RESP_DMA_ADDR ( bp );
743 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VER_GET, cmd_len );
747 rc = wait_resp ( bp, HWRM_CMD_DEFAULT_TIMEOUT, cmd_len, __func__ );
748 if ( rc )
749 return STATUS_FAILURE;
750
751 bp->hwrm_spec_code =
752 resp->hwrm_intf_maj_8b << 16 |
753 resp->hwrm_intf_min_8b << 8 |
754 resp->hwrm_intf_upd_8b;
755 bp->hwrm_cmd_timeout = ( u32 )resp->def_req_timeout;
756 if ( !bp->hwrm_cmd_timeout )
757 bp->hwrm_cmd_timeout = ( u32 )HWRM_CMD_DEFAULT_TIMEOUT;
758 if ( resp->hwrm_intf_maj_8b >= 1 )
759 bp->hwrm_max_req_len = resp->max_req_win_len;
760 bp->chip_id =
761 resp->chip_rev << 24 |
762 resp->chip_metal << 16 |
763 resp->chip_bond_id << 8 |
764 resp->chip_platform_type;
765 bp->chip_num = resp->chip_num;
766 if ( ( resp->dev_caps_cfg & SHORT_CMD_SUPPORTED ) &&
767 ( resp->dev_caps_cfg & SHORT_CMD_REQUIRED ) )
769 bp->hwrm_max_ext_req_len = resp->max_ext_req_len;
770 if ( ( bp->chip_num == CHIP_NUM_57508 ) ||
771 ( bp->chip_num == CHIP_NUM_57504 ) ||
772 ( bp->chip_num == CHIP_NUM_57502 ) ) {
773 FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P5 );
775 }
776 if ( bp->chip_num == CHIP_NUM_57608 ) {
777 FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P7 );
779 }
780 dbg_fw_ver ( resp, bp->hwrm_cmd_timeout );
781 return STATUS_SUCCESS;
782}
783
785{
786 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_resource_qcaps_input );
789 int rc;
790
791 DBGP ( "%s\n", __func__ );
792 req = ( struct hwrm_func_resource_qcaps_input * ) REQ_DMA_ADDR ( bp );
793 resp = ( struct hwrm_func_resource_qcaps_output * ) RESP_DMA_ADDR ( bp );
794 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESOURCE_QCAPS,
795 cmd_len );
796 req->fid = ( u16 )HWRM_NA_SIGNATURE;
797 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
798 if ( rc != STATUS_SUCCESS )
799 return STATUS_SUCCESS;
800
802
803 // VFs
804 if ( !bp->vf ) {
805 bp->max_vfs = resp->max_vfs;
806 bp->vf_res_strategy = resp->vf_reservation_strategy;
807 }
808
809 // vNICs
810 bp->min_vnics = resp->min_vnics;
811 bp->max_vnics = resp->max_vnics;
812
813 // MSI-X
814 bp->max_msix = resp->max_msix;
815
816 // Ring Groups
817 bp->min_hw_ring_grps = resp->min_hw_ring_grps;
818 bp->max_hw_ring_grps = resp->max_hw_ring_grps;
819
820 // TX Rings
821 bp->min_tx_rings = resp->min_tx_rings;
822 bp->max_tx_rings = resp->max_tx_rings;
823
824 // RX Rings
825 bp->min_rx_rings = resp->min_rx_rings;
826 bp->max_rx_rings = resp->max_rx_rings;
827
828 // Completion Rings
829 bp->min_cp_rings = resp->min_cmpl_rings;
830 bp->max_cp_rings = resp->max_cmpl_rings;
831
832 // RSS Contexts
833 bp->min_rsscos_ctxs = resp->min_rsscos_ctx;
834 bp->max_rsscos_ctxs = resp->max_rsscos_ctx;
835
836 // L2 Contexts
837 bp->min_l2_ctxs = resp->min_l2_ctxs;
838 bp->max_l2_ctxs = resp->max_l2_ctxs;
839
840 // Statistic Contexts
841 bp->min_stat_ctxs = resp->min_stat_ctx;
842 bp->max_stat_ctxs = resp->max_stat_ctx;
844 return STATUS_SUCCESS;
845}
846
847static u32 bnxt_set_ring_info ( struct bnxt *bp )
848{
849 u32 enables = 0;
850
851 DBGP ( "%s\n", __func__ );
852 bp->num_cmpl_rings = DEFAULT_NUMBER_OF_CMPL_RINGS;
853 bp->num_tx_rings = DEFAULT_NUMBER_OF_TX_RINGS;
854 bp->num_rx_rings = DEFAULT_NUMBER_OF_RX_RINGS;
855 bp->num_hw_ring_grps = DEFAULT_NUMBER_OF_RING_GRPS;
856 bp->num_stat_ctxs = DEFAULT_NUMBER_OF_STAT_CTXS;
857
858 if ( bp->min_cp_rings <= DEFAULT_NUMBER_OF_CMPL_RINGS )
859 bp->num_cmpl_rings = bp->min_cp_rings;
860
861 if ( bp->min_tx_rings <= DEFAULT_NUMBER_OF_TX_RINGS )
862 bp->num_tx_rings = bp->min_tx_rings;
863
864 if ( bp->min_rx_rings <= DEFAULT_NUMBER_OF_RX_RINGS )
865 bp->num_rx_rings = bp->min_rx_rings;
866
867 if ( bp->min_hw_ring_grps <= DEFAULT_NUMBER_OF_RING_GRPS )
868 bp->num_hw_ring_grps = bp->min_hw_ring_grps;
869
870 if ( bp->min_stat_ctxs <= DEFAULT_NUMBER_OF_STAT_CTXS )
871 bp->num_stat_ctxs = bp->min_stat_ctxs;
872
873 dbg_num_rings ( bp );
879 return enables;
880}
881
882static void bnxt_hwrm_assign_resources ( struct bnxt *bp )
883{
884 struct hwrm_func_cfg_input *req;
885 u32 enables = 0;
886
887 DBGP ( "%s\n", __func__ );
890
891 req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp );
892 req->num_cmpl_rings = bp->num_cmpl_rings;
893 req->num_tx_rings = bp->num_tx_rings;
894 req->num_rx_rings = bp->num_rx_rings;
895 req->num_stat_ctxs = bp->num_stat_ctxs;
896 req->num_hw_ring_grps = bp->num_hw_ring_grps;
897 req->enables = enables;
898}
899
900static int bnxt_hwrm_func_qcaps_req ( struct bnxt *bp )
901{
902 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcaps_input );
903 struct hwrm_func_qcaps_input *req;
904 struct hwrm_func_qcaps_output *resp;
905 int rc;
906
907 DBGP ( "%s\n", __func__ );
908 if ( bp->vf )
909 return STATUS_SUCCESS;
910
911 req = ( struct hwrm_func_qcaps_input * ) REQ_DMA_ADDR ( bp );
912 resp = ( struct hwrm_func_qcaps_output * ) RESP_DMA_ADDR ( bp );
913 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCAPS, cmd_len );
914 req->fid = ( u16 )HWRM_NA_SIGNATURE;
915 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
916 if ( rc ) {
917 DBGP ( "- %s ( ): Failed\n", __func__ );
918 return STATUS_FAILURE;
919 }
920
921 bp->fid = resp->fid;
922 bp->port_idx = ( u8 )resp->port_id;
923
925 bp->err_rcvry_supported = 1;
926 }
927
928 /* Get MAC address for this PF */
929 memcpy ( &bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN );
930 dbg_func_qcaps ( bp );
931
932 return STATUS_SUCCESS;
933}
934
935static int bnxt_hwrm_func_qcfg_req ( struct bnxt *bp )
936{
937 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcfg_input );
938 struct hwrm_func_qcfg_input *req;
939 struct hwrm_func_qcfg_output *resp;
940 int rc;
941
942 DBGP ( "%s\n", __func__ );
943 req = ( struct hwrm_func_qcfg_input * ) REQ_DMA_ADDR ( bp );
944 resp = ( struct hwrm_func_qcfg_output * ) RESP_DMA_ADDR ( bp );
945 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCFG, cmd_len );
946 req->fid = ( u16 )HWRM_NA_SIGNATURE;
947 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
948 if ( rc ) {
949 DBGP ( "- %s ( ): Failed\n", __func__ );
950 return STATUS_FAILURE;
951 }
952
954 FLAG_SET ( bp->flags, BNXT_FLAG_MULTI_HOST );
955
956 if ( resp->port_partition_type &
958 FLAG_SET ( bp->flags, BNXT_FLAG_NPAR_MODE );
959
960 bp->ordinal_value = ( u8 )resp->pci_id & 0x0F;
961 bp->stat_ctx_id = resp->stat_ctx_id;
962
963 /* If VF is set to TRUE, then use some data from func_qcfg ( ). */
964 if ( bp->vf ) {
965 bp->fid = resp->fid;
966 bp->port_idx = ( u8 )resp->port_id;
967 bp->vlan_id = resp->vlan;
968
969 /* Get MAC address for this VF */
970 memcpy ( bp->mac_addr, resp->mac_address, ETH_ALEN );
971 }
972 dbg_func_qcfg ( bp );
973 return STATUS_SUCCESS;
974}
975
977{
978 u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcaps_input );
979 struct hwrm_port_phy_qcaps_input *req;
980 struct hwrm_port_phy_qcaps_output *resp;
981 int rc;
982
983 DBGP ( "%s\n", __func__ );
984
985 req = ( struct hwrm_port_phy_qcaps_input * ) REQ_DMA_ADDR ( bp );
986 resp = ( struct hwrm_port_phy_qcaps_output * ) RESP_DMA_ADDR ( bp );
987 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCAPS, cmd_len );
988 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
989 if ( rc ) {
990 DBGP ( "-s %s ( ): Failed\n", __func__ );
991 return STATUS_FAILURE;
992 }
993
996
997 return STATUS_SUCCESS;
998}
999
1000static int bnxt_hwrm_func_reset_req ( struct bnxt *bp )
1001{
1002 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_reset_input );
1003 struct hwrm_func_reset_input *req;
1004
1005 DBGP ( "%s\n", __func__ );
1006 req = ( struct hwrm_func_reset_input * ) REQ_DMA_ADDR ( bp );
1007 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESET, cmd_len );
1008 if ( !bp->vf )
1010
1011 return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1012}
1013
1014static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp )
1015{
1016 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input );
1017 struct hwrm_func_cfg_input *req;
1018
1019 DBGP ( "%s\n", __func__ );
1020 if ( bp->vf )
1021 return STATUS_SUCCESS;
1022
1023 req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp );
1024 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
1025 req->fid = ( u16 )HWRM_NA_SIGNATURE;
1027 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) {
1031 req->num_msix = 1;
1032 req->num_vnics = 1;
1034 }
1035 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1036}
1037
1039{
1042 int rc = 0;
1043 u8 i = 0;
1044 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_error_recovery_qcfg_input );
1045
1046 DBGP ( "%s\n", __func__ );
1047 /* Set default error recovery heartbeat polling value (in 100ms)*/
1048 bp->er.drv_poll_freq = 100;
1049 if ( ! ( bp->err_rcvry_supported ) ) {
1050 return STATUS_SUCCESS;
1051 }
1052
1053 req = ( struct hwrm_error_recovery_qcfg_input * ) REQ_DMA_ADDR ( bp );
1054 resp = ( struct hwrm_error_recovery_qcfg_output * ) RESP_DMA_ADDR ( bp );
1055
1056 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_ER_QCFG, cmd_len );
1057
1058 rc = wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1059 if ( rc ) {
1060 DBGP ( "- %s ( ): Failed\n", __func__ );
1061 return STATUS_FAILURE;
1062 }
1063
1064 bp->er.flags = resp->flags;
1065 bp->er.drv_poll_freq = resp->driver_polling_freq;
1066 bp->er.master_wait_period = resp->master_wait_period;
1067 bp->er.normal_wait_period = resp->normal_wait_period;
1068 bp->er.master_wait_post_rst = resp->master_wait_post_reset;
1069 bp->er.max_bailout_post_rst = resp->max_bailout_time;
1070
1071 bp->er.fw_status_reg = resp->fw_health_status_reg;
1072 bp->er.fw_hb_reg = resp->fw_heartbeat_reg;
1073 bp->er.fw_rst_cnt_reg = resp->fw_reset_cnt_reg;
1074 bp->er.recvry_cnt_reg = resp->err_recovery_cnt_reg;
1075 bp->er.rst_inprg_reg = resp->reset_inprogress_reg;
1076
1077 bp->er.rst_inprg_reg_mask = resp->reset_inprogress_reg_mask;
1078 bp->er.reg_array_cnt = resp->reg_array_cnt;
1079
1080 DBGP ( "flags = 0x%x\n", resp->flags );
1081 DBGP ( "driver_polling_freq = 0x%x\n", resp->driver_polling_freq );
1082 DBGP ( "master_wait_period = 0x%x\n", resp->master_wait_period );
1083 DBGP ( "normal_wait_period = 0x%x\n", resp->normal_wait_period );
1084 DBGP ( "wait_post_reset = 0x%x\n", resp->master_wait_post_reset );
1085 DBGP ( "bailout_post_reset = 0x%x\n", resp->max_bailout_time );
1086 DBGP ( "reg_array_cnt = %x\n", resp->reg_array_cnt );
1087
1088 for ( i = 0; i < resp->reg_array_cnt; i++ ) {
1089 bp->er.rst_reg[i] = resp->reset_reg[i];
1090 bp->er.rst_reg_val[i] = resp->reset_reg_val[i];
1091 bp->er.delay_after_rst[i] = resp->delay_after_reset[i];
1092
1093 DBGP ( "rst_reg = %x ", bp->er.rst_reg[i] );
1094 DBGP ( "rst_reg_val = %x ", bp->er.rst_reg_val[i] );
1095 DBGP ( "rst_after_reset = %x\n", bp->er.delay_after_rst[i] );
1096 }
1097
1098 return STATUS_SUCCESS;
1099}
1100
1101static int bnxt_hwrm_func_drv_rgtr ( struct bnxt *bp )
1102{
1103 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_rgtr_input );
1104 struct hwrm_func_drv_rgtr_input *req;
1105 int rc;
1106
1107 DBGP ( "%s\n", __func__ );
1108 req = ( struct hwrm_func_drv_rgtr_input * ) REQ_DMA_ADDR ( bp );
1109 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_RGTR, cmd_len );
1110
1111 /* Register with HWRM */
1116
1121
1122 if ( bp->err_rcvry_supported ) {
1127 }
1128
1133 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1134 if ( rc ) {
1135 DBGP ( "- %s ( ): Failed\n", __func__ );
1136 return STATUS_FAILURE;
1137 }
1138
1139 FLAG_SET ( bp->flag_hwrm, VALID_DRIVER_REG );
1140 return STATUS_SUCCESS;
1141}
1142
1143static int bnxt_hwrm_func_drv_unrgtr ( struct bnxt *bp )
1144{
1145 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_unrgtr_input );
1146 struct hwrm_func_drv_unrgtr_input *req;
1147 int rc;
1148
1149 DBGP ( "%s\n", __func__ );
1150 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_DRIVER_REG ) ) )
1151 return STATUS_SUCCESS;
1152
1153 req = ( struct hwrm_func_drv_unrgtr_input * ) REQ_DMA_ADDR ( bp );
1154 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_UNRGTR, cmd_len );
1156 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1157 if ( rc )
1158 return STATUS_FAILURE;
1159
1160 FLAG_RESET ( bp->flag_hwrm, VALID_DRIVER_REG );
1161 return STATUS_SUCCESS;
1162}
1163
1164static int bnxt_hwrm_set_async_event ( struct bnxt *bp )
1165{
1166 int rc;
1167 u16 idx;
1168
1169 DBGP ( "%s\n", __func__ );
1170 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) )
1171 idx = bp->nq_ring_id;
1172 else
1173 idx = bp->cq_ring_id;
1174 if ( bp->vf ) {
1175 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_vf_cfg_input );
1176 struct hwrm_func_vf_cfg_input *req;
1177
1178 req = ( struct hwrm_func_vf_cfg_input * ) REQ_DMA_ADDR ( bp );
1179 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_VF_CFG,
1180 cmd_len );
1182 req->async_event_cr = idx;
1183 req->mtu = bp->mtu;
1184 req->guest_vlan = bp->vlan_id;
1185 memcpy ( ( char * )&req->dflt_mac_addr[0], bp->mac_addr,
1186 ETH_ALEN );
1187 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1188 } else {
1189 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input );
1190 struct hwrm_func_cfg_input *req;
1191
1192 req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp );
1193 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
1194 req->fid = ( u16 )HWRM_NA_SIGNATURE;
1196 req->async_event_cr = idx;
1197 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1198 }
1199 return rc;
1200}
1201
1203{
1204 u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_alloc_input );
1207 int rc;
1209 u32 enables;
1210
1211 DBGP ( "%s\n", __func__ );
1212 req = ( struct hwrm_cfa_l2_filter_alloc_input * ) REQ_DMA_ADDR ( bp );
1213 resp = ( struct hwrm_cfa_l2_filter_alloc_output * ) RESP_DMA_ADDR ( bp );
1214 if ( bp->vf )
1219
1220 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_ALLOC,
1221 cmd_len );
1222 req->flags = flags;
1223 req->enables = enables;
1224 memcpy ( ( char * )&req->l2_addr[0], ( char * )&bp->mac_addr[0],
1225 ETH_ALEN );
1226 memset ( ( char * )&req->l2_addr_mask[0], 0xff, ETH_ALEN );
1227 if ( !bp->vf ) {
1228 memcpy ( ( char * )&req->t_l2_addr[0], bp->mac_addr, ETH_ALEN );
1229 memset ( ( char * )&req->t_l2_addr_mask[0], 0xff, ETH_ALEN );
1230 }
1232 req->src_id = ( u32 )bp->port_idx;
1233 req->dst_id = bp->vnic_id;
1234 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1235 if ( rc )
1236 return STATUS_FAILURE;
1237
1238 FLAG_SET ( bp->flag_hwrm, VALID_L2_FILTER );
1239 bp->l2_filter_id = resp->l2_filter_id;
1240 return STATUS_SUCCESS;
1241}
1242
1244{
1245 u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_free_input );
1247 int rc;
1248
1249 DBGP ( "%s\n", __func__ );
1250 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_L2_FILTER ) ) )
1251 return STATUS_SUCCESS;
1252
1253 req = ( struct hwrm_cfa_l2_filter_free_input * ) REQ_DMA_ADDR ( bp );
1254 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_FREE,
1255 cmd_len );
1256 req->l2_filter_id = bp->l2_filter_id;
1257 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1258 if ( rc ) {
1259 DBGP ( "- %s ( ): Failed\n", __func__ );
1260 return STATUS_FAILURE;
1261 }
1262
1263 FLAG_RESET ( bp->flag_hwrm, VALID_L2_FILTER );
1264 return STATUS_SUCCESS;
1265}
1266
1268{
1269 u32 mask = 0;
1270
1271 if ( !rx_mask )
1272 return mask;
1273
1275 if ( rx_mask != RX_MASK_ACCEPT_NONE ) {
1276 if ( rx_mask & RX_MASK_ACCEPT_MULTICAST )
1278 if ( rx_mask & RX_MASK_ACCEPT_ALL_MULTICAST )
1280 if ( rx_mask & RX_MASK_PROMISCUOUS_MODE )
1282 }
1283 return mask;
1284}
1285
1286static int bnxt_hwrm_set_rx_mask ( struct bnxt *bp, u32 rx_mask )
1287{
1288 u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_set_rx_mask_input );
1290 u32 mask = set_rx_mask ( rx_mask );
1291
1292 req = ( struct hwrm_cfa_l2_set_rx_mask_input * ) REQ_DMA_ADDR ( bp );
1293 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_SET_RX_MASK,
1294 cmd_len );
1295 req->vnic_id = bp->vnic_id;
1296 req->mask = mask;
1297
1298 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1299}
1300
1301static int bnxt_hwrm_port_phy_qcfg ( struct bnxt *bp, u16 idx )
1302{
1303 u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcfg_input );
1304 struct hwrm_port_phy_qcfg_input *req;
1305 struct hwrm_port_phy_qcfg_output *resp;
1306 int rc;
1307
1308 DBGP ( "%s\n", __func__ );
1309 req = ( struct hwrm_port_phy_qcfg_input * ) REQ_DMA_ADDR ( bp );
1310 resp = ( struct hwrm_port_phy_qcfg_output * ) RESP_DMA_ADDR ( bp );
1311 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCFG, cmd_len );
1312 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1313 if ( rc ) {
1314 DBGP ( "- %s ( ): Failed\n", __func__ );
1315 return STATUS_FAILURE;
1316 }
1317
1318 if ( idx & SUPPORT_SPEEDS )
1319 bp->support_speeds = resp->support_speeds;
1320
1321 if ( idx & SUPPORT_SPEEDS2 )
1322 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) )
1323 bp->auto_link_speeds2_mask = resp->auto_link_speeds2;
1324
1325 if ( idx & DETECT_MEDIA )
1326 bp->media_detect = resp->module_status;
1327
1328 if ( idx & PHY_SPEED )
1329 bp->current_link_speed = resp->link_speed;
1330
1331 if ( idx & PHY_STATUS ) {
1332 if ( resp->link == PORT_PHY_QCFG_RESP_LINK_LINK )
1333 bp->link_status = STATUS_LINK_ACTIVE;
1334 else
1335 bp->link_status = STATUS_LINK_DOWN;
1336 }
1337 return STATUS_SUCCESS;
1338}
1339
1341 u16 data_len, u16 option_num, u16 dimensions, u16 index_0 )
1342{
1343 u16 cmd_len = ( u16 )sizeof ( struct hwrm_nvm_get_variable_input );
1344 struct hwrm_nvm_get_variable_input *req;
1345
1346 DBGP ( "%s\n", __func__ );
1347 req = ( struct hwrm_nvm_get_variable_input * ) REQ_DMA_ADDR ( bp );
1348 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_NVM_GET_VARIABLE, cmd_len );
1349 req->dest_data_addr = DMA_DMA_ADDR ( bp );
1350 req->data_len = data_len;
1351 req->option_num = option_num;
1352 req->dimensions = dimensions;
1353 req->index_0 = index_0;
1354 return wait_resp ( bp,
1355 HWRM_CMD_FLASH_MULTIPLAYER ( bp->hwrm_cmd_timeout ),
1356 cmd_len, __func__ );
1357}
1358
1359static int bnxt_get_link_speed ( struct bnxt *bp )
1360{
1361 u32 *ptr32 = ( u32 * ) DMA_DMA_ADDR ( bp );
1362
1363 DBGP ( "%s\n", __func__ );
1364 if ( ! ( FLAG_TEST (bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ) ) {
1367 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1368 return STATUS_FAILURE;
1369 bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT );
1372 ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1373 return STATUS_FAILURE;
1374 bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK,
1376 }
1379 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1380 return STATUS_FAILURE;
1381 bp->link_set |= SET_LINK ( *ptr32, SPEED_FW_MASK, SPEED_FW_SHIFT );
1384 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1385 return STATUS_FAILURE;
1386 bp->link_set |= SET_LINK ( *ptr32,
1388
1389 /* Use LINK_SPEED_FW_xxx which is valid for CHIP_P7 and earlier devices */
1390 switch ( bp->link_set & LINK_SPEED_FW_MASK ) {
1391 case LINK_SPEED_FW_1G:
1393 break;
1394 case LINK_SPEED_FW_2_5G:
1396 break;
1397 case LINK_SPEED_FW_10G:
1399 break;
1400 case LINK_SPEED_FW_25G:
1402 break;
1403 case LINK_SPEED_FW_40G:
1405 break;
1406 case LINK_SPEED_FW_50G:
1408 break;
1411 break;
1412 case LINK_SPEED_FW_100G:
1414 break;
1417 break;
1420 break;
1421 case LINK_SPEED_FW_200G:
1423 break;
1426 break;
1429 break;
1432 break;
1435 break;
1436 default:
1438 break;
1439 }
1440 prn_set_speed ( bp->link_set );
1441 return STATUS_SUCCESS;
1442}
1443
1445{
1446 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_qcfg_input );
1448
1449 DBGP ( "%s\n", __func__ );
1450 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1451 return STATUS_SUCCESS;
1452
1453 req = ( struct hwrm_func_backing_store_qcfg_input * ) REQ_DMA_ADDR ( bp );
1454 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_QCFG,
1455 cmd_len );
1456 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1457}
1458
1460{
1461 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_cfg_input );
1463
1464 DBGP ( "%s\n", __func__ );
1465 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1466 return STATUS_SUCCESS;
1467
1468 req = ( struct hwrm_func_backing_store_cfg_input * ) REQ_DMA_ADDR ( bp );
1469 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_CFG,
1470 cmd_len );
1472 req->enables = 0;
1473 return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1474}
1475
1476static int bnxt_hwrm_queue_qportcfg ( struct bnxt *bp )
1477{
1478 u16 cmd_len = ( u16 )sizeof ( struct hwrm_queue_qportcfg_input );
1479 struct hwrm_queue_qportcfg_input *req;
1480 struct hwrm_queue_qportcfg_output *resp;
1481 int rc;
1482
1483 DBGP ( "%s\n", __func__ );
1484 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1485 return STATUS_SUCCESS;
1486
1487 req = ( struct hwrm_queue_qportcfg_input * ) REQ_DMA_ADDR ( bp );
1488 resp = ( struct hwrm_queue_qportcfg_output * ) RESP_DMA_ADDR ( bp );
1489 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_QUEUE_QPORTCFG, cmd_len );
1490 req->flags = 0;
1491 req->port_id = 0;
1492 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1493 if ( rc ) {
1494 DBGP ( "- %s ( ): Failed\n", __func__ );
1495 return STATUS_FAILURE;
1496 }
1497
1498 bp->queue_id = resp->queue_id0;
1499 return STATUS_SUCCESS;
1500}
1501
1502static int bnxt_hwrm_port_mac_cfg ( struct bnxt *bp )
1503{
1504 u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_mac_cfg_input );
1505 struct hwrm_port_mac_cfg_input *req;
1506
1507 DBGP ( "%s\n", __func__ );
1508 if ( bp->vf )
1509 return STATUS_SUCCESS;
1510
1511 req = ( struct hwrm_port_mac_cfg_input * ) REQ_DMA_ADDR ( bp );
1512 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_MAC_CFG, cmd_len );
1514 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1515}
1516
1517static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp )
1518{
1519 u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_cfg_input );
1520 struct hwrm_port_phy_cfg_input *req;
1521 u32 flags;
1522 u32 enables = 0;
1528 u8 auto_mode = 0;
1529 u8 auto_pause = 0;
1530 u8 auto_duplex = 0;
1531
1532 DBGP ( "%s\n", __func__ );
1533 req = ( struct hwrm_port_phy_cfg_input * ) REQ_DMA_ADDR ( bp );
1536
1537 switch ( GET_MEDIUM_SPEED ( bp->medium ) ) {
1540 break;
1542 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1545 } else {
1547 }
1548 break;
1550 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1553 } else {
1555 }
1556 break;
1558 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1561 } else {
1563 }
1564 break;
1566 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1569 } else {
1571 }
1572 break;
1574 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1577 } else {
1580 }
1581 break;
1583 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1586 } else {
1588 }
1589 break;
1591 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1594 } else {
1597 }
1598 break;
1600 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1603 }
1604 break;
1606 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1609 } else {
1612 }
1613 break;
1615 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1618 }
1619 break;
1621 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1624 }
1625 break;
1627 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1630 }
1631 break;
1632 default:
1638 if ( FLAG_TEST (bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) )
1640 else
1645 auto_link_speed_mask = bp->support_speeds;
1646 auto_link_speeds2_mask = bp->auto_link_speeds2_mask;
1647 break;
1648 }
1649
1650 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_CFG, cmd_len );
1651 req->flags = flags;
1652 req->enables = enables;
1653 req->port_id = bp->port_idx;
1657 req->auto_mode = auto_mode;
1658 req->auto_duplex = auto_duplex;
1659 req->auto_pause = auto_pause;
1662
1663 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1664}
1665
1666static int bnxt_query_phy_link ( struct bnxt *bp )
1667{
1669
1670 DBGP ( "%s\n", __func__ );
1671 /* Query Link Status */
1673 return STATUS_FAILURE;
1674 }
1675
1676 if ( bp->link_status == STATUS_LINK_ACTIVE )
1677 return STATUS_SUCCESS;
1678
1679 /* If VF is set to TRUE, Do not issue the following commands */
1680 if ( bp->vf )
1681 return STATUS_SUCCESS;
1682
1683 /* If multi_host or NPAR, Do not issue bnxt_get_link_speed */
1684 if ( FLAG_TEST ( bp->flags, PORT_PHY_FLAGS ) ) {
1685 dbg_flags ( __func__, bp->flags );
1686 return STATUS_SUCCESS;
1687 }
1688
1689 /* HWRM_NVM_GET_VARIABLE - speed */
1690 if ( bnxt_get_link_speed ( bp ) != STATUS_SUCCESS ) {
1691 return STATUS_FAILURE;
1692 }
1693
1694 /* Configure link if it is not up */
1696
1697 /* refresh link speed values after bringing link up */
1698 return bnxt_hwrm_port_phy_qcfg ( bp, flag );
1699}
1700
1701static int bnxt_get_phy_link ( struct bnxt *bp )
1702{
1703 u16 i;
1705
1706 DBGP ( "%s\n", __func__ );
1707 dbg_chip_info ( bp );
1708 for ( i = 0; i < ( bp->wait_link_timeout / 100 ); i++ ) {
1710 break;
1711
1712 if ( bp->link_status == STATUS_LINK_ACTIVE )
1713 break;
1714
1715// if ( bp->media_detect )
1716// break;
1718 }
1719 dbg_link_state ( bp, ( u32 ) ( ( i + 1 ) * 100 ) );
1720 if ( !bp->er.er_rst_on ) {
1721 bnxt_set_link ( bp );
1722 }
1723 return STATUS_SUCCESS;
1724}
1725
1726static int bnxt_hwrm_stat_ctx_alloc ( struct bnxt *bp )
1727{
1728 u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_alloc_input );
1729 struct hwrm_stat_ctx_alloc_input *req;
1730 struct hwrm_stat_ctx_alloc_output *resp;
1731 int rc;
1732
1733 DBGP ( "%s\n", __func__ );
1734 req = ( struct hwrm_stat_ctx_alloc_input * ) REQ_DMA_ADDR ( bp );
1735 resp = ( struct hwrm_stat_ctx_alloc_output * ) RESP_DMA_ADDR ( bp );
1736 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_ALLOC, cmd_len );
1737 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1738 if ( rc ) {
1739 DBGP ( "- %s ( ): Failed\n", __func__ );
1740 return STATUS_FAILURE;
1741 }
1742
1743 FLAG_SET ( bp->flag_hwrm, VALID_STAT_CTX );
1744 bp->stat_ctx_id = ( u16 )resp->stat_ctx_id;
1745 return STATUS_SUCCESS;
1746}
1747
1748static int bnxt_hwrm_stat_ctx_free ( struct bnxt *bp )
1749{
1750 u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_free_input );
1751 struct hwrm_stat_ctx_free_input *req;
1752 int rc;
1753
1754 DBGP ( "%s\n", __func__ );
1755 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_STAT_CTX ) ) )
1756 return STATUS_SUCCESS;
1757
1758 req = ( struct hwrm_stat_ctx_free_input * ) REQ_DMA_ADDR ( bp );
1759 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_FREE, cmd_len );
1760 req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
1761 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1762 if ( rc ) {
1763 DBGP ( "- %s ( ): Failed\n", __func__ );
1764 return STATUS_FAILURE;
1765 }
1766
1767 FLAG_RESET ( bp->flag_hwrm, VALID_STAT_CTX );
1768 return STATUS_SUCCESS;
1769}
1770
1771static int bnxt_hwrm_ring_free_grp ( struct bnxt *bp )
1772{
1773 u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_free_input );
1774 struct hwrm_ring_grp_free_input *req;
1775 int rc;
1776
1777 DBGP ( "%s\n", __func__ );
1778 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_GRP ) ) )
1779 return STATUS_SUCCESS;
1780
1781 req = ( struct hwrm_ring_grp_free_input * ) REQ_DMA_ADDR ( bp );
1782 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_FREE, cmd_len );
1783 req->ring_group_id = ( u32 )bp->ring_grp_id;
1784 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1785 if ( rc ) {
1786 DBGP ( "- %s ( ): Failed\n", __func__ );
1787 return STATUS_FAILURE;
1788 }
1789
1790 FLAG_RESET ( bp->flag_hwrm, VALID_RING_GRP );
1791 return STATUS_SUCCESS;
1792}
1793
1794static int bnxt_hwrm_ring_alloc_grp ( struct bnxt *bp )
1795{
1796 u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_alloc_input );
1797 struct hwrm_ring_grp_alloc_input *req;
1798 struct hwrm_ring_grp_alloc_output *resp;
1799 int rc;
1800
1801 DBGP ( "%s\n", __func__ );
1802 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) )
1803 return STATUS_SUCCESS;
1804
1805 req = ( struct hwrm_ring_grp_alloc_input * ) REQ_DMA_ADDR ( bp );
1806 resp = ( struct hwrm_ring_grp_alloc_output * ) RESP_DMA_ADDR ( bp );
1807 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_ALLOC, cmd_len );
1808 req->cr = bp->cq_ring_id;
1809 req->rr = bp->rx_ring_id;
1810 req->ar = ( u16 )HWRM_NA_SIGNATURE;
1811 if ( bp->vf )
1812 req->sc = bp->stat_ctx_id;
1813
1814 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1815 if ( rc ) {
1816 DBGP ( "- %s ( ): Failed\n", __func__ );
1817 return STATUS_FAILURE;
1818 }
1819
1820 FLAG_SET ( bp->flag_hwrm, VALID_RING_GRP );
1821 bp->ring_grp_id = ( u16 )resp->ring_group_id;
1822 return STATUS_SUCCESS;
1823}
1824
1825int bnxt_hwrm_ring_free ( struct bnxt *bp, u16 ring_id, u8 ring_type )
1826{
1827 u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_free_input );
1828 struct hwrm_ring_free_input *req;
1829
1830 DBGP ( "%s\n", __func__ );
1831 req = ( struct hwrm_ring_free_input * ) REQ_DMA_ADDR ( bp );
1832 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_FREE, cmd_len );
1833 req->ring_type = ring_type;
1834 req->ring_id = ring_id;
1835 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1836}
1837
1838static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type )
1839{
1840 u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_alloc_input );
1841 struct hwrm_ring_alloc_input *req;
1842 struct hwrm_ring_alloc_output *resp;
1843 int rc;
1844
1845 DBGP ( "%s\n", __func__ );
1846 req = ( struct hwrm_ring_alloc_input * ) REQ_DMA_ADDR ( bp );
1847 resp = ( struct hwrm_ring_alloc_output * ) RESP_DMA_ADDR ( bp );
1848 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_ALLOC, cmd_len );
1849 req->ring_type = type;
1850 switch ( type ) {
1852 req->page_size = LM_PAGE_BITS ( 12 );
1853 req->int_mode = BNXT_CQ_INTR_MODE ( ( (FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7) ) || bp->vf ) );
1854 req->length = ( u32 )bp->nq.ring_cnt;
1855 req->logical_id = 0xFFFF; // Required value for Thor FW?
1856 req->page_tbl_addr = NQ_DMA_ADDR ( bp );
1857 break;
1859 req->page_size = LM_PAGE_BITS ( 8 );
1860 req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
1861 req->length = ( u32 )bp->cq.ring_cnt;
1862 req->page_tbl_addr = CQ_DMA_ADDR ( bp );
1863 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1864 break;
1866 req->nq_ring_id = bp->nq_ring_id;
1867 req->cq_handle = ( u64 )bp->nq_ring_id;
1868 break;
1870 req->page_size = LM_PAGE_BITS ( 8 );
1872 req->length = ( u32 )bp->tx.ring_cnt;
1873 req->queue_id = ( u16 )bp->queue_id;
1874 req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
1875 req->cmpl_ring_id = bp->cq_ring_id;
1876 req->page_tbl_addr = TX_DMA_ADDR ( bp );
1877 break;
1879 req->page_size = LM_PAGE_BITS ( 8 );
1881 req->length = ( u32 )bp->rx.ring_cnt;
1882 req->stat_ctx_id = ( u32 )STAT_CTX_ID;
1883 req->cmpl_ring_id = bp->cq_ring_id;
1884 req->page_tbl_addr = RX_DMA_ADDR ( bp );
1885 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1886 break;
1887 req->queue_id = ( u16 )RX_RING_QID;
1890 break;
1891 default:
1892 return STATUS_SUCCESS;
1893 }
1894 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1895 if ( rc ) {
1896 DBGP ( "- %s ( ): Failed, type = %x\n", __func__, type );
1897 return STATUS_FAILURE;
1898 }
1899
1901 FLAG_SET ( bp->flag_hwrm, VALID_RING_CQ );
1902 bp->cq_ring_id = resp->ring_id;
1903 } else if ( type == RING_ALLOC_REQ_RING_TYPE_TX ) {
1904 FLAG_SET ( bp->flag_hwrm, VALID_RING_TX );
1905 bp->tx_ring_id = resp->ring_id;
1906 } else if ( type == RING_ALLOC_REQ_RING_TYPE_RX ) {
1907 FLAG_SET ( bp->flag_hwrm, VALID_RING_RX );
1908 bp->rx_ring_id = resp->ring_id;
1909 } else if ( type == RING_ALLOC_REQ_RING_TYPE_NQ ) {
1910 FLAG_SET ( bp->flag_hwrm, VALID_RING_NQ );
1911 bp->nq_ring_id = resp->ring_id;
1912 }
1913 return STATUS_SUCCESS;
1914}
1915
1916static int bnxt_hwrm_ring_alloc_cq ( struct bnxt *bp )
1917{
1918 DBGP ( "%s\n", __func__ );
1920}
1921
1922static int bnxt_hwrm_ring_alloc_tx ( struct bnxt *bp )
1923{
1924 DBGP ( "%s\n", __func__ );
1926}
1927
1928static int bnxt_hwrm_ring_alloc_rx ( struct bnxt *bp )
1929{
1930 DBGP ( "%s\n", __func__ );
1932}
1933
1934static int bnxt_hwrm_ring_free_cq ( struct bnxt *bp )
1935{
1936 int ret = STATUS_SUCCESS;
1937
1938 DBGP ( "%s\n", __func__ );
1939 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_CQ ) ) )
1940 return ret;
1941
1942 ret = RING_FREE ( bp, bp->cq_ring_id, RING_FREE_REQ_RING_TYPE_L2_CMPL );
1943 if ( ret == STATUS_SUCCESS )
1944 FLAG_RESET ( bp->flag_hwrm, VALID_RING_CQ );
1945
1946 return ret;
1947}
1948
1949static int bnxt_hwrm_ring_free_tx ( struct bnxt *bp )
1950{
1951 int ret = STATUS_SUCCESS;
1952
1953 DBGP ( "%s\n", __func__ );
1954 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_TX ) ) )
1955 return ret;
1956
1957 ret = RING_FREE ( bp, bp->tx_ring_id, RING_FREE_REQ_RING_TYPE_TX );
1958 if ( ret == STATUS_SUCCESS )
1959 FLAG_RESET ( bp->flag_hwrm, VALID_RING_TX );
1960
1961 return ret;
1962}
1963
1964static int bnxt_hwrm_ring_free_rx ( struct bnxt *bp )
1965{
1966 int ret = STATUS_SUCCESS;
1967
1968 DBGP ( "%s\n", __func__ );
1969 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_RX ) ) )
1970 return ret;
1971
1972 ret = RING_FREE ( bp, bp->rx_ring_id, RING_FREE_REQ_RING_TYPE_RX );
1973 if ( ret == STATUS_SUCCESS )
1974 FLAG_RESET ( bp->flag_hwrm, VALID_RING_RX );
1975
1976 return ret;
1977}
1978
1979static int bnxt_hwrm_ring_alloc_nq ( struct bnxt *bp )
1980{
1981 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1982 return STATUS_SUCCESS;
1984}
1985
1986static int bnxt_hwrm_ring_free_nq ( struct bnxt *bp )
1987{
1988 int ret = STATUS_SUCCESS;
1989
1990 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1991 return STATUS_SUCCESS;
1992
1993 DBGP ( "%s\n", __func__ );
1994 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_NQ ) ) )
1995 return ret;
1996
1997 ret = RING_FREE ( bp, bp->nq_ring_id, RING_FREE_REQ_RING_TYPE_NQ );
1998 if ( ret == STATUS_SUCCESS )
1999 FLAG_RESET ( bp->flag_hwrm, VALID_RING_NQ );
2000
2001 return ret;
2002}
2003
2004static int bnxt_hwrm_vnic_alloc ( struct bnxt *bp )
2005{
2006 u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_alloc_input );
2007 struct hwrm_vnic_alloc_input *req;
2008 struct hwrm_vnic_alloc_output *resp;
2009 int rc;
2010
2011 DBGP ( "%s\n", __func__ );
2012 req = ( struct hwrm_vnic_alloc_input * ) REQ_DMA_ADDR ( bp );
2013 resp = ( struct hwrm_vnic_alloc_output * ) RESP_DMA_ADDR ( bp );
2014 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_ALLOC, cmd_len );
2016 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
2017 if ( rc ) {
2018 DBGP ( "- %s ( ): Failed\n", __func__ );
2019 return STATUS_FAILURE;
2020 }
2021
2022 FLAG_SET ( bp->flag_hwrm, VALID_VNIC_ID );
2023 bp->vnic_id = resp->vnic_id;
2024 return STATUS_SUCCESS;
2025}
2026
2027static int bnxt_hwrm_vnic_free ( struct bnxt *bp )
2028{
2029 u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_free_input );
2030 struct hwrm_vnic_free_input *req;
2031 int rc;
2032
2033 DBGP ( "%s\n", __func__ );
2034 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_VNIC_ID ) ) )
2035 return STATUS_SUCCESS;
2036
2037 req = ( struct hwrm_vnic_free_input * ) REQ_DMA_ADDR ( bp );
2038 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_FREE, cmd_len );
2039 req->vnic_id = bp->vnic_id;
2040 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
2041 if ( rc ) {
2042 DBGP ( "- %s ( ): Failed\n", __func__ );
2043 return STATUS_FAILURE;
2044 }
2045
2046 FLAG_RESET ( bp->flag_hwrm, VALID_VNIC_ID );
2047 return STATUS_SUCCESS;
2048}
2049
2050static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp )
2051{
2052 u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_cfg_input );
2053 struct hwrm_vnic_cfg_input *req;
2054
2055 DBGP ( "%s\n", __func__ );
2056 req = ( struct hwrm_vnic_cfg_input * ) REQ_DMA_ADDR ( bp );
2057 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_CFG, cmd_len );
2059 req->mru = bp->mtu;
2060
2061 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) {
2064 req->default_rx_ring_id = bp->rx_ring_id;
2065 req->default_cmpl_ring_id = bp->cq_ring_id;
2066 } else {
2068 req->dflt_ring_grp = bp->ring_grp_id;
2069 }
2070
2071 req->vnic_id = bp->vnic_id;
2072 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
2073}
2074
2075static int bnxt_set_rx_mask ( struct bnxt *bp )
2076{
2077 return bnxt_hwrm_set_rx_mask ( bp, RX_MASK );
2078}
2079
2080static int bnxt_reset_rx_mask ( struct bnxt *bp )
2081{
2082 return bnxt_hwrm_set_rx_mask ( bp, 0 );
2083}
2084
2085static int bnxt_get_link_state ( struct bnxt *bp )
2086{
2087 int rc = 0;
2088
2089 DBGP ( "%s \n", __func__ );
2091
2092 return rc;
2093}
2094
2095typedef int ( *hwrm_func_t ) ( struct bnxt *bp );
2096
2098 bnxt_hwrm_ver_get, /* HWRM_VER_GET */
2099 bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */
2100 bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */
2101 bnxt_get_device_address, /* HW MAC address */
2103 NULL
2104};
2105
2107 bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */
2108 NULL,
2109};
2110
2112 bnxt_hwrm_cfa_l2_filter_free, /* HWRM_CFA_L2_FILTER_FREE */
2114 bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
2115 bnxt_free_rx_iob, /* HWRM_FREE_IOB */
2116 bnxt_hwrm_vnic_free, /* HWRM_VNIC_FREE */
2117 bnxt_hwrm_ring_free_grp, /* HWRM_RING_GRP_FREE */
2118 bnxt_hwrm_ring_free_rx, /* HWRM_RING_FREE - RX Ring */
2119 bnxt_hwrm_ring_free_tx, /* HWRM_RING_FREE - TX Ring */
2120 bnxt_hwrm_stat_ctx_free, /* HWRM_STAT_CTX_FREE */
2121 bnxt_hwrm_ring_free_cq, /* HWRM_RING_FREE - CQ Ring */
2122 bnxt_hwrm_ring_free_nq, /* HWRM_RING_FREE - NQ Ring */
2123 bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */
2124 NULL,
2125};
2127 bnxt_hwrm_ver_get, /* HWRM_VER_GET */
2128 bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */
2129 bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */
2130 bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */
2131 bnxt_hwrm_error_recovery_req, /* HWRM_ERROR_RECOVERY_REQ */
2132 bnxt_hwrm_backing_store_cfg, /* HWRM_FUNC_BACKING_STORE_CFG */
2133 bnxt_hwrm_backing_store_qcfg, /* HWRM_FUNC_BACKING_STORE_QCFG */
2134 bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */
2135 bnxt_hwrm_port_phy_qcaps_req, /* HWRM_PORT_PHY_QCAPS */
2136 bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */
2137 bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */
2138 bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG */
2139 bnxt_query_phy_link, /* HWRM_PORT_PHY_QCFG */
2140 bnxt_get_device_address, /* HW MAC address */
2141 NULL,
2142};
2143
2145 bnxt_hwrm_stat_ctx_alloc, /* HWRM_STAT_CTX_ALLOC */
2146 bnxt_hwrm_queue_qportcfg, /* HWRM_QUEUE_QPORTCFG */
2147 bnxt_hwrm_ring_alloc_nq, /* HWRM_RING_ALLOC - NQ Ring */
2148 bnxt_hwrm_ring_alloc_cq, /* HWRM_RING_ALLOC - CQ Ring */
2149 bnxt_hwrm_ring_alloc_tx, /* HWRM_RING_ALLOC - TX Ring */
2150 bnxt_hwrm_ring_alloc_rx, /* HWRM_RING_ALLOC - RX Ring */
2151 bnxt_hwrm_ring_alloc_grp, /* HWRM_RING_GRP_ALLOC - Group */
2152 bnxt_hwrm_vnic_alloc, /* HWRM_VNIC_ALLOC */
2153 bnxt_post_rx_buffers, /* Post RX buffers */
2154 bnxt_hwrm_set_async_event, /* ENABLES_ASYNC_EVENT_CR */
2155 bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
2156 bnxt_hwrm_cfa_l2_filter_alloc, /* HWRM_CFA_L2_FILTER_ALLOC */
2157 bnxt_get_phy_link, /* HWRM_PORT_PHY_QCFG - PhyLink */
2158 bnxt_set_rx_mask, /* HWRM_CFA_L2_SET_RX_MASK */
2159 NULL,
2160};
2161
2162int bnxt_hwrm_run ( hwrm_func_t cmds[], struct bnxt *bp )
2163{
2164 hwrm_func_t *ptr;
2165 int ret;
2166
2167 for ( ptr = cmds; *ptr; ++ptr ) {
2168 memset ( ( void * ) REQ_DMA_ADDR ( bp ), 0, REQ_BUFFER_SIZE );
2169 memset ( ( void * ) RESP_DMA_ADDR ( bp ), 0, RESP_BUFFER_SIZE );
2170 ret = ( *ptr ) ( bp );
2171 if ( ret ) {
2172 DBGP ( "- %s ( ): Failed\n", __func__ );
2173 return STATUS_FAILURE;
2174 }
2175 }
2176 return STATUS_SUCCESS;
2177}
2178
2179#define bnxt_down_chip( bp ) bnxt_hwrm_run ( bring_down_chip, bp )
2180#define bnxt_up_chip( bp ) bnxt_hwrm_run ( bring_up_chip, bp )
2181#define bnxt_down_nic( bp ) bnxt_hwrm_run ( bring_down_nic, bp )
2182#define bnxt_up_nic( bp ) bnxt_hwrm_run ( bring_up_nic, bp )
2183#define bnxt_up_init( bp ) bnxt_hwrm_run ( bring_up_init, bp )
2184
2185static int bnxt_open ( struct net_device *dev )
2186{
2187 struct bnxt *bp = dev->priv;
2188
2189 DBGP ( "%s\n", __func__ );
2190
2191 /* Allocate and Initialise device specific parameters */
2192 if ( bnxt_alloc_rings_mem ( bp ) != 0 ) {
2193 DBGP ( "- %s ( ): bnxt_alloc_rings_mem Failed\n", __func__ );
2194 return -ENOMEM;
2195 }
2196
2197 bnxt_mm_nic ( bp );
2198
2199 if ( bnxt_up_chip ( bp ) != 0 ) {
2200 DBGP ( "- %s ( ): bnxt_up_chip Failed\n", __func__ );
2201 goto err_bnxt_open;
2202 }
2203
2204 if ( bnxt_up_nic ( bp ) != 0 ) {
2205 DBGP ( "- %s ( ): bnxt_up_nic\n", __func__);
2206 goto err_bnxt_open;
2207 }
2208
2209 return 0;
2210
2211err_bnxt_open:
2212 bnxt_down_nic ( bp );
2213
2215
2216 return -1;
2217}
2218
2219static void bnxt_tx_adjust_pkt ( struct bnxt *bp, struct io_buffer *iob )
2220{
2221 u16 prev_len = iob_len ( iob );
2222
2223 bp->vlan_tx = bnxt_get_pkt_vlan ( ( char * )iob->data );
2224 if ( !bp->vlan_tx && bp->vlan_id )
2225 bnxt_add_vlan ( iob, bp->vlan_id );
2226
2227 dbg_tx_vlan ( bp, ( char * )iob->data, prev_len, iob_len ( iob ) );
2228 if ( iob_len ( iob ) != prev_len )
2229 prev_len = iob_len ( iob );
2230
2231}
2232
2233static int bnxt_tx ( struct net_device *dev, struct io_buffer *iob )
2234{
2235 struct bnxt *bp = dev->priv;
2236 u16 len, entry;
2237 physaddr_t mapping;
2238
2239 if ( bp->er.er_rst_on ) {
2240 /* Error recovery has been initiated */
2241 return -EBUSY;
2242 }
2243
2244 if ( bnxt_tx_avail ( bp ) < 1 ) {
2245 DBGP ( "- %s ( ): Failed no bd's available\n", __func__ );
2246 return -ENOBUFS;
2247 }
2248
2249 mapping = iob_dma ( iob );
2250 bnxt_tx_adjust_pkt ( bp, iob );
2251 entry = bp->tx.prod_id;
2252 len = iob_len ( iob );
2253 bp->tx.iob[entry] = iob;
2254 bnxt_set_txq ( bp, entry, mapping, len );
2255 entry = NEXT_IDX ( entry, bp->tx.ring_cnt );
2256 /* If the ring has wrapped, toggle the epoch bit */
2257 if ( bp->tx.prod_id > entry )
2258 bp->tx.epoch ^= 1;
2259 dump_tx_pkt ( ( u8 * )iob->data, len, bp->tx.prod_id );
2260 /* Packets are ready, update Tx producer idx local and on card. */
2261 bnxt_db_tx ( bp, ( u32 )entry );
2262 bp->tx.prod_id = entry;
2263 bp->tx.cnt_req++;
2264 /* memory barrier */
2265 mb ( );
2266 return 0;
2267}
2268
2269static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt )
2270{
2271 u16 cons_id;
2272
2273 cons_id = bp->nq.cons_id + cnt;
2274 if ( cons_id >= bp->nq.ring_cnt ) {
2275 /* Toggle completion bit when the ring wraps. */
2276 bp->nq.completion_bit ^= 1;
2277 bp->nq.epoch ^= 1;
2278 cons_id = cons_id - bp->nq.ring_cnt;
2279 }
2280 bp->nq.cons_id = cons_id;
2281}
2282
2283void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt )
2284{
2285 if ( evt->event_data1 & 0x01 )
2286 bp->link_status = STATUS_LINK_ACTIVE;
2287 else
2288 bp->link_status = STATUS_LINK_DOWN;
2289
2290 bnxt_set_link ( bp );
2291 dbg_link_status ( bp );
2292}
2293
2294#define BNXT_FW_HEALTH_WIN_OFF 0x3000
2295#define BNXT_REG_WINDOW_BASE 0x400
2296#define BNXT_GRC_BASE_MASK 0xfff
2297#define BNXT_GRC_OFFSET_MASK 0xffc
2298
2299u32 bnxt_er_reg_write ( struct bnxt *bp, u32 reg_addr, u32 reg_val)
2300{
2301 u32 reg_base = 0;
2302
2303 reg_base = reg_addr & ~BNXT_GRC_BASE_MASK;
2304
2305 writel ( reg_base, bp->bar0 + BNXT_REG_WINDOW_BASE + 8 );
2306
2307 writel ( reg_val, bp->bar0 + ( BNXT_FW_HEALTH_WIN_OFF +
2308 ( reg_addr & BNXT_GRC_OFFSET_MASK ) ) );
2309
2310 DBGP ("bnxt_er_reg_write: reg_addr = %x, reg_val = %x\n", reg_addr, reg_val);
2311 return reg_val;
2312}
2313
2314u32 bnxt_er_reg_read ( struct bnxt *bp, u32 reg_addr)
2315{
2316 u32 reg_val = 0;
2317 u32 reg_base = 0;
2318
2319 reg_base = reg_addr & ~BNXT_GRC_BASE_MASK;
2320
2321 writel ( reg_base, bp->bar0 + BNXT_REG_WINDOW_BASE + 8 );
2322
2323 reg_val = readl ( bp->bar0 + ( BNXT_FW_HEALTH_WIN_OFF +
2324 ( reg_addr & BNXT_GRC_OFFSET_MASK ) ) );
2325
2326 DBGP ("bnxt_er_reg_read: reg_addr = %x, reg_val = %x\n", reg_addr, reg_val);
2327 return reg_val;
2328}
2329
2330u32 bnxt_er_get_reg_val ( struct bnxt *bp, u32 reg_addr, u32 reg_type, u32 mask )
2331{
2332 u32 reg_val = 0;
2333
2334 switch ( reg_type ) {
2336 pci_read_config_dword ( bp->pdev, reg_addr & mask, &reg_val );
2337 break;
2339 reg_val = bnxt_er_reg_read ( bp, reg_addr );
2340 break;
2342 reg_val = readl ( bp->bar0 + ( reg_addr & mask ) );
2343 break;
2345 reg_val = readl ( bp->bar1 + ( reg_addr & mask ) );
2346 break;
2347 default:
2348 break;
2349 }
2350 DBGP ( "read_reg_val bp %p addr %x type %x : reg_val = %x\n", bp, reg_addr, reg_type, reg_val );
2351 return reg_val;
2352}
2353
2354void bnxt_rst_reg_val ( struct bnxt *bp, u32 reg_addr, u32 reg_val )
2355{
2357 u32 reg_type = reg_addr & ER_QCFG_RESET_REG_ADDR_SPACE_MASK;
2358
2359 switch ( reg_type ) {
2361 pci_write_config_dword ( bp->pdev, reg_addr & mask, reg_val );
2362 break;
2364 bnxt_er_reg_write ( bp, reg_addr, reg_val );
2365 break;
2367 writel ( reg_val, bp->bar0 + ( reg_addr & mask ) );
2368 break;
2370 writel ( reg_val, bp->bar1 + ( reg_addr & mask ) );
2371 break;
2372 default:
2373 break;
2374 }
2375}
2376
2378{
2379 u32 delay_time = 0;
2380 u8 i;
2381
2382 for ( i = 0; i < bp->er.reg_array_cnt; i++ ) {
2383 bnxt_rst_reg_val ( bp, bp->er.rst_reg[i], bp->er.rst_reg_val[i] );
2384
2385 delay_time = bp->er.delay_after_rst[i];
2386 if ( delay_time ) {
2387 udelay ( delay_time * 100000 );
2388 }
2389 }
2390
2391}
2392
2393void bnxt_er_task ( struct bnxt* bp, u8 hb_task )
2394{
2395 u32 present_hb_cnt;
2396 unsigned short pci_command, new_command;
2397 u8 i;
2398
2399 DBGP ( "%s(hb_task: %d)\n", __func__, hb_task );
2400 if ( bp->er.er_rst_on ) {
2401 if ( timer_running ( &bp->wait_timer) ) {
2402 /* Reset already in progress */
2403 return;
2404 }
2405 }
2406
2407 if ( hb_task ) {
2408 present_hb_cnt = bnxt_er_get_reg_val ( bp,
2409 bp->er.fw_hb_reg,
2410 bp->er.fw_hb_reg & ER_QCFG_FW_HB_REG_ADDR_SPACE_MASK,
2412
2413 if ( present_hb_cnt != bp->er.last_fw_hb ) {
2414 bp->er.last_fw_hb = present_hb_cnt;
2415 return;
2416 }
2417 }
2418
2419 /* Heartbeat not incrementing, trigger error recovery */
2420 DBGP ( "%s(): Trigger Error Recovery\n", __func__ );
2421 bp->er.er_rst_on = 1;
2422 /* Set a recovery phase wait timer */
2423 start_timer_fixed ( &bp->wait_timer, BNXT_ER_WAIT_TIMER_INTERVAL ( bp ) );
2424
2425 /* Disable bus master */
2426 pci_read_config_word ( bp->pdev, PCI_COMMAND, &pci_command );
2427 new_command = pci_command & ~PCI_COMMAND_MASTER;
2428 pci_write_config_word ( bp->pdev, PCI_COMMAND, new_command );
2429
2430 /* Free up resources */
2431 bnxt_free_rx_iob ( bp );
2432
2433 /* wait for firmware to be operational */
2434 udelay ( bp->er.rst_min_dsecs * 100000 );
2435
2436 /* Reconfigure the PCI attributes */
2437 pci_write_config_word ( bp->pdev, PCI_COMMAND, pci_command );
2438
2439 if ( hb_task ) {
2440 if ( bp->er.master_pf ) {
2441 /* wait for master func wait period */
2442 udelay ( bp->er.master_wait_period * 100000 );
2443
2444 /* Reset register values */
2446
2447 /* wait for master wait post reset */
2448 udelay ( bp->er.master_wait_post_rst * 100000 );
2449 } else {
2450 /* wait for normal func wait period */
2451 udelay ( bp->er.normal_wait_period * 100000 );
2452 }
2453 }
2454
2455 for ( i = 0; i < bp->er.max_bailout_post_rst; i++ ) {
2456 bp->er.fw_health_status = bnxt_er_get_reg_val ( bp,
2457 bp->er.fw_status_reg,
2458 bp->er.fw_status_reg & ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK,
2460
2461 if ( bp->er.fw_health_status == FW_STATUS_REG_CODE_READY )
2462 break;
2463
2464 /* wait for 1 second */
2465 udelay ( 1000000 );
2466 }
2467
2468 if ( bp->er.fw_health_status == FW_STATUS_REG_CODE_READY ) {
2469 /* Initialize resources */
2470 bnxt_mm_nic ( bp );
2471
2472 /* Get device specific information */
2473 bnxt_up_chip ( bp );
2474
2475 /* Allocate queues */
2476 bnxt_up_nic ( bp );
2477 }
2478
2479 /* Clear Reset in progress flag */
2480 bp->er.er_rst_on = 0;
2481 stop_timer ( &bp->wait_timer );
2482}
2483
2485 struct hwrm_async_event_cmpl *evt )
2486{
2487 if ( evt->event_data1 &
2489 bp->er.driver_initiated_recovery = 1;
2490 start_timer_fixed ( &bp->task_timer, BNXT_ER_TIMER_INTERVAL ( bp ) );
2491
2492 } else {
2493 bp->er.driver_initiated_recovery = 0;
2494 stop_timer ( &bp->task_timer );
2495 }
2496
2497 if ( evt->event_data1 &
2499 bp->er.master_pf = 1;
2500 } else {
2501 bp->er.master_pf = 0;
2502 }
2503
2504 bp->er.fw_health_status = bnxt_er_get_reg_val ( bp,
2505 bp->er.fw_status_reg,
2506 bp->er.fw_status_reg & ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK,
2508 /* Intialize the last fw heart beat count */
2509 bp->er.last_fw_hb = 0;
2510 bp->er.last_fw_rst_cnt = bnxt_er_get_reg_val ( bp,
2511 bp->er.fw_rst_cnt_reg,
2512 bp->er.fw_rst_cnt_reg & ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_MASK,
2514 bp->er.rst_in_progress = bnxt_er_get_reg_val ( bp,
2515 bp->er.rst_inprg_reg,
2516 bp->er.rst_inprg_reg & ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_MASK,
2518 bp->er.err_recovery_cnt = bnxt_er_get_reg_val ( bp,
2519 bp->er.recvry_cnt_reg,
2520 bp->er.recvry_cnt_reg & ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_MASK,
2522}
2523
2525 struct hwrm_async_event_cmpl *evt )
2526{
2527 DBGP ( "Reset Notify Async event" );
2528 if ( ( ( evt->event_data1 ) &
2531 DBGP ( " error recovery initiated\n" );
2532 bp->er.rst_min_dsecs = evt->timestamp_lo;
2533 bp->er.rst_max_dsecs = evt->timestamp_hi;
2534
2535 if ( bp->er.rst_min_dsecs == 0 )
2536 bp->er.rst_min_dsecs = ER_DFLT_FW_RST_MIN_DSECS;
2537
2538 if ( bp->er.rst_max_dsecs == 0 )
2539 bp->er.rst_max_dsecs = ER_DFLT_FW_RST_MAX_DSECS;
2540
2541 // Trigger Error recovery
2542 bp->er.er_initiate = 1;
2543 }
2544}
2545
2547{
2549 DBGP ("bnxt_link_speed_evt: event data = %lx\n",
2551 }
2552
2554 return;
2555 }
2556
2557 bnxt_set_link ( bp );
2558 dbg_link_info ( bp );
2559 dbg_link_status ( bp );
2560}
2561
2575
2590
2591static void bnxt_service_cq ( struct net_device *dev )
2592{
2593 struct bnxt *bp = dev->priv;
2594 struct cmpl_base *cmp;
2595 struct tx_cmpl *tx;
2596 u16 old_cid = bp->cq.cons_id;
2598 u32 cq_type;
2599 struct hwrm_async_event_cmpl *evt;
2600
2601 while ( done == SERVICE_NEXT_CQ_BD ) {
2602 cmp = ( struct cmpl_base * )BD_NOW ( CQ_DMA_ADDR ( bp ),
2603 bp->cq.cons_id,
2604 sizeof ( struct cmpl_base ) );
2605
2606 if ( ( cmp->info3_v & CMPL_BASE_V ) ^ bp->cq.completion_bit )
2607 break;
2608
2609 cq_type = cmp->type & CMPL_BASE_TYPE_MASK;
2610 dump_evt ( ( u8 * )cmp, cq_type, bp->cq.cons_id, 0 );
2611 dump_cq ( cmp, bp->cq.cons_id, bp->nq.toggle );
2612
2613 switch ( cq_type ) {
2615 tx = ( struct tx_cmpl * )cmp;
2616 bnxt_tx_complete ( dev, ( u16 )tx->opaque );
2617 /* Fall through */
2619 bnxt_adv_cq_index ( bp, 1 );
2620 break;
2623 done = bnxt_rx_complete ( dev,
2624 ( struct rx_pkt_cmpl * )cmp );
2625 break;
2627 evt = ( struct hwrm_async_event_cmpl * )cmp;
2628 switch ( evt->event_id ) {
2630 bnxt_link_evt ( bp,
2631 ( struct hwrm_async_event_cmpl * )cmp );
2632 break;
2635 ( struct hwrm_async_event_cmpl * )cmp );
2636 break;
2639 ( struct hwrm_async_event_cmpl * )cmp );
2640 break;
2643 ( struct hwrm_async_event_cmpl * )cmp );
2644 break;
2647 ( struct hwrm_async_event_cmpl * )cmp );
2648 break;
2651 ( struct hwrm_async_event_cmpl * )cmp );
2652 break;
2653 default:
2654 break;
2655 }
2656 bnxt_adv_cq_index ( bp, 1 );
2657 break;
2658 default:
2660 break;
2661 }
2662 }
2663
2664 if ( bp->cq.cons_id != old_cid )
2665 bnxt_db_cq ( bp );
2666}
2667
2668static void bnxt_service_nq ( struct net_device *dev )
2669{
2670 struct bnxt *bp = dev->priv;
2671 struct nq_base *nqp;
2672 u16 old_cid = bp->nq.cons_id;
2674 u32 nq_type;
2675 struct hwrm_async_event_cmpl *evt;
2676
2677 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
2678 return;
2679
2680 while ( done == SERVICE_NEXT_NQ_BD ) {
2681 nqp = ( struct nq_base * )BD_NOW ( NQ_DMA_ADDR ( bp ),
2682 bp->nq.cons_id,
2683 sizeof ( struct nq_base ) );
2684 if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit )
2685 break;
2686 nq_type = ( nqp->type & NQ_CN_TYPE_MASK );
2687 bp->nq.toggle = ( ( nqp->type & NQ_CN_TOGGLE_MASK ) >> NQ_CN_TOGGLE_SFT );
2688 dump_evt ( ( u8 * )nqp, nq_type, bp->nq.cons_id, 1 );
2689 dump_nq ( nqp, bp->nq.cons_id );
2690
2691 switch ( nq_type ) {
2693 evt = ( struct hwrm_async_event_cmpl * )nqp;
2694 switch ( evt->event_id ) {
2696 bnxt_link_evt ( bp,
2697 ( struct hwrm_async_event_cmpl * )nqp );
2698 break;
2701 ( struct hwrm_async_event_cmpl * )nqp );
2702 break;
2705 ( struct hwrm_async_event_cmpl * )nqp );
2706 break;
2709 ( struct hwrm_async_event_cmpl * )nqp );
2710 break;
2713 ( struct hwrm_async_event_cmpl * )nqp );
2714 break;
2717 ( struct hwrm_async_event_cmpl * )nqp );
2718 break;
2719 default:
2720 break;
2721 }
2722 bnxt_adv_nq_index ( bp, 1 );
2723 break;
2725 bnxt_adv_nq_index ( bp, 1 );
2726 break;
2727 default:
2729 break;
2730 }
2731 }
2732
2733 if ( bp->nq.cons_id != old_cid )
2734 bnxt_db_nq ( bp );
2735}
2736
2737static void bnxt_er_task_timer ( struct retry_timer *timer, int over __unused )
2738{
2739 struct bnxt *bp = container_of (timer, struct bnxt, task_timer );
2740
2741 /* Restart timer */
2743 if ( bp->er.driver_initiated_recovery ) {
2744 bnxt_er_task ( bp, 1 );
2745 }
2746}
2747
2748static void bnxt_er_wait_timer ( struct retry_timer *timer, int over __unused )
2749{
2750 struct bnxt *bp = container_of (timer, struct bnxt, wait_timer );
2751 /* The sole function of this timer is to wait for the specified
2752 * amount of time to complete error recovery phase
2753 */
2754 stop_timer ( &bp->wait_timer );
2755 return;
2756}
2757
2758static void bnxt_poll ( struct net_device *dev )
2759{
2760 struct bnxt *bp = dev->priv;
2761
2762 mb ( );
2763 bnxt_service_nq ( dev );
2764 bnxt_service_cq ( dev );
2765
2766 if ( bp->er.er_initiate ) {
2767 bnxt_er_task ( bp, 0 );
2768 bp->er.er_initiate = 0;
2769 }
2770
2771}
2772
2773static void bnxt_close ( struct net_device *dev )
2774{
2775 struct bnxt *bp = dev->priv;
2776
2777 DBGP ( "%s\n", __func__ );
2778 stop_timer ( &bp->task_timer );
2779 stop_timer ( &bp->wait_timer );
2780
2781 bnxt_down_nic (bp);
2782
2784
2785}
2786
2788 .open = bnxt_open,
2789 .close = bnxt_close,
2790 .poll = bnxt_poll,
2791 .transmit = bnxt_tx,
2792};
2793
2794static int bnxt_init_one ( struct pci_device *pci )
2795{
2796 struct net_device *netdev;
2797 struct bnxt *bp;
2798 int err = 0;
2799
2800 DBGP ( "%s\n", __func__ );
2801 /* Allocate network device */
2802 netdev = alloc_etherdev ( sizeof ( *bp ) );
2803 if ( !netdev ) {
2804 DBGP ( "- %s ( ): alloc_etherdev Failed\n", __func__ );
2805 err = -ENOMEM;
2806 goto disable_pdev;
2807 }
2808
2809 /* Initialise network device */
2811
2812 /* Driver private area for this device */
2813 bp = netdev->priv;
2814
2815 /* Set PCI driver private data */
2816 pci_set_drvdata ( pci, netdev );
2817
2818 /* Clear Private area data */
2819 memset ( bp, 0, sizeof ( *bp ) );
2820 bp->pdev = pci;
2821 bp->dev = netdev;
2822 netdev->dev = &pci->dev;
2823
2824 timer_init ( &bp->task_timer, bnxt_er_task_timer, &netdev->refcnt );
2825 timer_init ( &bp->wait_timer, bnxt_er_wait_timer, &netdev->refcnt );
2826
2827 /* Configure DMA */
2828 bp->dma = &pci->dma;
2829 netdev->dma = bp->dma;
2830
2831 /* Enable PCI device */
2832 adjust_pci_device ( pci );
2833
2834 /* Get PCI Information */
2836
2837 /* Allocate HWRM memory */
2839
2840 bp->link_status = STATUS_LINK_DOWN;
2841 bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
2842 if ( bnxt_up_init ( bp ) != 0 ) {
2843 goto err_down_chip;
2844 }
2845
2846 /* Register Network device */
2847 if ( ( err = register_netdev ( netdev ) ) != 0 ) {
2848 DBGP ( "- %s ( ): register_netdev Failed\n", __func__ );
2849 goto err_down_chip;
2850 }
2851
2852 /* Set Initial Link State */
2853 bnxt_set_link ( bp );
2854
2855 return 0;
2856
2858
2859err_down_chip:
2860 bnxt_down_pci ( bp );
2862 netdev_put ( netdev );
2863
2864disable_pdev:
2865 pci_set_drvdata ( pci, NULL );
2866 return err;
2867}
2868
2869static void bnxt_remove_one ( struct pci_device *pci )
2870{
2871 struct net_device *netdev = pci_get_drvdata ( pci );
2872 struct bnxt *bp = netdev->priv;
2873
2874 DBGP ( "%s\n", __func__ );
2875 /* Unregister network device */
2877
2878 /* Free HWRM buffers */
2880
2881 /* iounmap PCI BAR ( s ) */
2882 bnxt_down_pci ( bp );
2883
2884 /* Stop network device */
2886
2887 /* Drop refernce to network device */
2888 netdev_put ( netdev );
2889 DBGP ( "%s - Done\n", __func__ );
2890}
2891
2892/* Broadcom NXE PCI driver */
2893struct pci_driver bnxt_pci_driver __pci_driver = {
2894 .ids = bnxt_nics,
2895 .id_count = ARRAY_SIZE ( bnxt_nics ),
2896 .probe = bnxt_init_one,
2897 .remove = bnxt_remove_one,
2898};
#define NULL
NULL pointer (VOID *)
Definition Base.h:322
struct golan_eqe_cmd cmd
Definition CIB_PRM.h:1
uint32_t flag
Flag number.
Definition aqc1xx.h:2
struct arbelprm_rc_send_wqe rc
Definition arbel.h:3
unsigned long physaddr_t
Definition stdint.h:20
static const void * src
Definition string.h:48
#define assert(condition)
Assert a condition at run-time.
Definition assert.h:50
#define MAX_TX_DESC_CNT
Definition bnx2.h:3881
#define MAX_RX_DESC_CNT
Definition bnx2.h:3885
static int bnxt_hwrm_set_rx_mask(struct bnxt *bp, u32 rx_mask)
Definition bnxt.c:1286
static int bnxt_hwrm_error_recovery_req(struct bnxt *bp)
Definition bnxt.c:1038
static int bnxt_hwrm_ring_alloc_nq(struct bnxt *bp)
Definition bnxt.c:1979
static int bnxt_hwrm_ring_free_grp(struct bnxt *bp)
Definition bnxt.c:1771
static u32 bnxt_tx_avail(struct bnxt *bp)
Definition bnxt.c:292
void bnxt_rst_er_registers(struct bnxt *bp)
Definition bnxt.c:2377
int bnxt_alloc_rings_mem(struct bnxt *bp)
Definition bnxt.c:643
static void bnxt_poll(struct net_device *dev)
Definition bnxt.c:2758
static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
Definition bnxt.c:1748
static int bnxt_hwrm_vnic_alloc(struct bnxt *bp)
Definition bnxt.c:2004
static int bnxt_alloc_rx_iob(struct bnxt *bp, u16 cons_id, u16 iob_idx)
Definition bnxt.c:370
static void bnxt_close(struct net_device *dev)
Definition bnxt.c:2773
u32 bnxt_er_reg_read(struct bnxt *bp, u32 reg_addr)
Definition bnxt.c:2314
static int bnxt_hwrm_ring_free_cq(struct bnxt *bp)
Definition bnxt.c:1934
hwrm_func_t bring_down_nic[]
Definition bnxt.c:2111
static void bnxt_db_nq(struct bnxt *bp)
Definition bnxt.c:221
int bnxt_free_rx_iob(struct bnxt *bp)
Definition bnxt.c:337
hwrm_func_t bring_down_chip[]
Definition bnxt.c:2106
static int bnxt_hwrm_ring_alloc_grp(struct bnxt *bp)
Definition bnxt.c:1794
#define BNXT_FW_HEALTH_WIN_OFF
Definition bnxt.c:2294
static int bnxt_get_pci_info(struct bnxt *bp)
Definition bnxt.c:132
static int bnxt_open(struct net_device *dev)
Definition bnxt.c:2185
static void bnxt_db_cq(struct bnxt *bp)
Definition bnxt.c:234
static int bnxt_hwrm_ring_alloc_rx(struct bnxt *bp)
Definition bnxt.c:1928
static int bnxt_tx(struct net_device *dev, struct io_buffer *iob)
Definition bnxt.c:2233
void bnxt_mm_init_hwrm(struct bnxt *bp, const char *func)
Definition bnxt.c:516
hwrm_func_t bring_up_chip[]
Definition bnxt.c:2126
#define BNXT_GRC_BASE_MASK
Definition bnxt.c:2296
static int bnxt_get_link_state(struct bnxt *bp)
Definition bnxt.c:2085
static int bnxt_reset_rx_mask(struct bnxt *bp)
Definition bnxt.c:2080
void bnxt_er_task(struct bnxt *bp, u8 hb_task)
Definition bnxt.c:2393
static int bnxt_hwrm_cfa_l2_filter_alloc(struct bnxt *bp)
Definition bnxt.c:1202
static void bnxt_service_cq(struct net_device *dev)
Definition bnxt.c:2591
static void bnxt_tx_adjust_pkt(struct bnxt *bp, struct io_buffer *iob)
Definition bnxt.c:2219
void bnxt_set_txq(struct bnxt *bp, int entry, physaddr_t mapping, int len)
Definition bnxt.c:304
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
Definition bnxt.c:1101
static int bnxt_hwrm_func_qcfg_req(struct bnxt *bp)
Definition bnxt.c:935
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
Definition bnxt.c:1476
static int bnxt_hwrm_cfa_l2_filter_free(struct bnxt *bp)
Definition bnxt.c:1243
hwrm_func_t bring_up_nic[]
Definition bnxt.c:2144
static void bnxt_down_pci(struct bnxt *bp)
Definition bnxt.c:106
void bnxt_link_speed_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2546
static int bnxt_hwrm_backing_store_qcfg(struct bnxt *bp)
Definition bnxt.c:1444
static int bnxt_hwrm_backing_store_cfg(struct bnxt *bp)
Definition bnxt.c:1459
#define BNXT_GRC_OFFSET_MASK
Definition bnxt.c:2297
u8 bnxt_is_pci_vf(struct pci_device *pdev)
Check if Virtual Function.
Definition bnxt.c:98
static int bnxt_hwrm_ring_alloc_tx(struct bnxt *bp)
Definition bnxt.c:1922
static int bnxt_hwrm_ver_get(struct bnxt *bp)
Definition bnxt.c:733
static int bnxt_get_link_speed(struct bnxt *bp)
Definition bnxt.c:1359
static int bnxt_hwrm_func_resource_qcaps(struct bnxt *bp)
Definition bnxt.c:784
static void bnxt_db_rx(struct bnxt *bp, u32 idx)
Definition bnxt.c:248
int bnxt_hwrm_ring_free(struct bnxt *bp, u16 ring_id, u8 ring_type)
Definition bnxt.c:1825
static int wait_resp(struct bnxt *bp, u32 tmo, u16 len, const char *func)
Definition bnxt.c:703
static void short_hwrm_cmd_req(struct bnxt *bp, u16 len)
Definition bnxt.c:688
static void bnxt_er_task_timer(struct retry_timer *timer, int over __unused)
Definition bnxt.c:2737
static int bnxt_hwrm_ring_free_nq(struct bnxt *bp)
Definition bnxt.c:1986
u32 bnxt_er_get_reg_val(struct bnxt *bp, u32 reg_addr, u32 reg_type, u32 mask)
Definition bnxt.c:2330
static void bnxt_set_link(struct bnxt *bp)
Definition bnxt.c:184
static u32 bnxt_set_ring_info(struct bnxt *bp)
Definition bnxt.c:847
int bnxt_post_rx_buffers(struct bnxt *bp)
Definition bnxt.c:387
void bnxt_free_rings_mem(struct bnxt *bp)
Definition bnxt.c:575
static int bnxt_hwrm_vnic_free(struct bnxt *bp)
Definition bnxt.c:2027
static int bnxt_hwrm_port_mac_cfg(struct bnxt *bp)
Definition bnxt.c:1502
void bnxt_add_vlan(struct io_buffer *iob, u16 vlan)
Definition bnxt.c:271
void bnxt_rst_reg_val(struct bnxt *bp, u32 reg_addr, u32 reg_val)
Definition bnxt.c:2354
static void * bnxt_pci_base(struct pci_device *pdev, unsigned int reg)
Definition bnxt.c:123
static int bnxt_hwrm_nvm_get_variable_req(struct bnxt *bp, u16 data_len, u16 option_num, u16 dimensions, u16 index_0)
Definition bnxt.c:1340
static void bnxt_db_tx(struct bnxt *bp, u32 idx)
Definition bnxt.c:259
void bnxt_link_speed_chg_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2562
int bnxt_alloc_hwrm_mem(struct bnxt *bp)
Definition bnxt.c:621
void bnxt_mm_init_rings(struct bnxt *bp, const char *func)
Definition bnxt.c:527
static struct pci_device_id bnxt_nics[]
Definition bnxt.c:29
#define bnxt_up_chip(bp)
Definition bnxt.c:2180
static int bnxt_set_rx_mask(struct bnxt *bp)
Definition bnxt.c:2075
void bnxt_process_er_event(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2484
#define bnxt_up_init(bp)
Definition bnxt.c:2183
static struct net_device_operations bnxt_netdev_ops
Definition bnxt.c:2787
static void bnxt_set_rx_desc(u8 *buf, struct io_buffer *iob, u16 cid, u32 idx)
Definition bnxt.c:357
static int bnxt_hwrm_ring_free_tx(struct bnxt *bp)
Definition bnxt.c:1949
static void bnxt_hwrm_assign_resources(struct bnxt *bp)
Definition bnxt.c:882
int(* hwrm_func_t)(struct bnxt *bp)
Definition bnxt.c:2095
static int bnxt_init_one(struct pci_device *pci)
Definition bnxt.c:2794
#define BNXT_REG_WINDOW_BASE
Definition bnxt.c:2295
hwrm_func_t bring_up_init[]
Definition bnxt.c:2097
void bnxt_port_phy_chg_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2576
static void bnxt_adv_nq_index(struct bnxt *bp, u16 cnt)
Definition bnxt.c:2269
void bnxt_rx_process(struct net_device *dev, struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi)
Definition bnxt.c:472
static int bnxt_get_device_address(struct bnxt *bp)
Definition bnxt.c:170
#define bnxt_down_nic(bp)
Definition bnxt.c:2181
static int bnxt_hwrm_vnic_cfg(struct bnxt *bp)
Definition bnxt.c:2050
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
Definition bnxt.c:1143
static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp)
Definition bnxt.c:1517
static int bnxt_hwrm_func_cfg_req(struct bnxt *bp)
Definition bnxt.c:1014
static void dev_p7_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, u32 toggle)
Definition bnxt.c:207
void bnxt_mm_nic(struct bnxt *bp)
Definition bnxt.c:546
static int bnxt_hwrm_ring_alloc_cq(struct bnxt *bp)
Definition bnxt.c:1916
static int bnxt_hwrm_func_qcaps_req(struct bnxt *bp)
Definition bnxt.c:900
static void bnxt_tx_complete(struct net_device *dev, u16 hw_idx)
Definition bnxt.c:324
void bnxt_link_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2283
static void hwrm_init(struct bnxt *bp, struct input *req, u16 cmd, u16 len)
Definition bnxt.c:667
static void bnxt_er_wait_timer(struct retry_timer *timer, int over __unused)
Definition bnxt.c:2748
#define bnxt_up_nic(bp)
Definition bnxt.c:2182
static int bnxt_hwrm_port_phy_qcaps_req(struct bnxt *bp)
Definition bnxt.c:976
u32 set_rx_mask(u32 rx_mask)
Definition bnxt.c:1267
static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, u16 idx)
Definition bnxt.c:1301
static void bnxt_service_nq(struct net_device *dev)
Definition bnxt.c:2668
static int bnxt_hwrm_set_async_event(struct bnxt *bp)
Definition bnxt.c:1164
static int bnxt_hwrm_ring_free_rx(struct bnxt *bp)
Definition bnxt.c:1964
static void hwrm_write_req(struct bnxt *bp, void *req, u32 cnt)
Definition bnxt.c:677
static int bnxt_get_phy_link(struct bnxt *bp)
Definition bnxt.c:1701
static int bnxt_rx_complete(struct net_device *dev, struct rx_pkt_cmpl *rx)
Definition bnxt.c:496
static int bnxt_hwrm_ring_alloc(struct bnxt *bp, u8 type)
Definition bnxt.c:1838
static int bnxt_hwrm_func_reset_req(struct bnxt *bp)
Definition bnxt.c:1000
static void bnxt_remove_one(struct pci_device *pci)
Definition bnxt.c:2869
static int bnxt_query_phy_link(struct bnxt *bp)
Definition bnxt.c:1666
u32 bnxt_er_reg_write(struct bnxt *bp, u32 reg_addr, u32 reg_val)
Definition bnxt.c:2299
static void dev_p5_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag)
Definition bnxt.c:192
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
Definition bnxt.c:1726
static void bnxt_adv_cq_index(struct bnxt *bp, u16 cnt)
Definition bnxt.c:458
u8 bnxt_rx_drop(struct bnxt *bp, struct io_buffer *iob, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len)
Definition bnxt.c:417
static u16 bnxt_get_pkt_vlan(char *src)
Definition bnxt.c:285
void bnxt_free_hwrm_mem(struct bnxt *bp)
Definition bnxt.c:601
void bnxt_process_reset_notify_event(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2524
int bnxt_hwrm_run(hwrm_func_t cmds[], struct bnxt *bp)
Definition bnxt.c:2162
#define DEFAULT_NUMBER_OF_STAT_CTXS
Definition bnxt.h:145
#define LINK_SPEED_FW_40G
Definition bnxt.h:297
#define MAX_CQ_DESC_CNT
Definition bnxt.h:149
#define LINK_SPEED_FW_1G
Definition bnxt.h:291
#define SET_MEDIUM_SPEED(bp, s)
Definition bnxt.h:117
#define STATUS_TIMEOUT
Definition bnxt.h:79
#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM
Definition bnxt.h:360
#define LINK_SPEED_FW_100G_PAM4
Definition bnxt.h:307
#define NQ_DMA_ADDR(bp)
Definition bnxt.h:178
#define NQ_CN_TYPE_MASK
Definition bnxt.h:570
#define GET_MEDIUM_SPEED(m)
Definition bnxt.h:116
#define MEDIUM_SPEED_25GBPS
Definition bnxt.h:101
#define CMPL_BASE_TYPE_TX_L2
Definition bnxt.h:524
#define IPXE_VERSION_UPDATE
Definition bnxt.h:33
#define STATUS_FAILURE
Definition bnxt.h:56
#define VALID_RING_NQ
Definition bnxt.h:910
#define RING_FREE(bp, rid, flag)
Definition bnxt.h:1083
#define ETHERTYPE_VLAN
Definition bnxt.h:191
#define MEDIA_AUTO_DETECT_MASK
Definition bnxt.h:1092
#define STAT_CTX_ID
Definition bnxt.h:175
#define D3_SPEED_FW_SHIFT
Definition bnxt.h:1091
#define SET_MEDIUM_DUPLEX(bp, d)
Definition bnxt.h:122
#define FW_STATUS_REG_CODE_READY
Definition bnxt.h:944
#define NQ_CN_TOGGLE_MASK
Definition bnxt.h:572
#define HWRM_CMD_DEFAULT_TIMEOUT
Definition bnxt.h:134
#define VALID_RING_GRP
Definition bnxt.h:906
#define CHIP_NUM_57608
Definition bnxt.h:1108
#define PORT_PHY_FLAGS
Definition bnxt.h:1080
#define MEDIUM_SPEED_400PAM4_112GBPS
Definition bnxt.h:111
#define MEDIUM_SPEED_50PAM4GBPS
Definition bnxt.h:106
#define CMPL_BASE_TYPE_RX_L2_V3
Definition bnxt.h:529
#define TX_RING_BUFFER_SIZE
Definition bnxt.h:150
#define SERVICE_NEXT_NQ_BD
Definition bnxt.h:186
#define SHORT_CMD_SUPPORTED
Definition bnxt.h:1064
#define RX_MASK
Definition bnxt.h:168
#define STATUS_LINK_DOWN
Definition bnxt.h:60
#define BD_NOW(bd, entry, len)
Definition bnxt.h:162
#define LINK_SPEED_DRV_NUM
Definition bnxt.h:220
#define STATUS_LINK_ACTIVE
Definition bnxt.h:59
#define DEFAULT_NUMBER_OF_RING_GRPS
Definition bnxt.h:144
#define DB_OFFSET_VF
Definition bnxt.h:196
#define RX_PKT_V3_CMPL_TYPE_RX_L2_V3
Definition bnxt.h:728
#define DMA_ALIGN_4K
Definition bnxt.h:155
#define DEFAULT_NUMBER_OF_TX_RINGS
Definition bnxt.h:142
#define BNXT_CQ_INTR_MODE(vf)
Definition bnxt.h:163
#define DBC_DBC_TYPE_SQ
Definition bnxt.h:434
#define IPXE_VERSION_MINOR
Definition bnxt.h:32
#define TX_BD_FLAGS
Definition bnxt.h:1075
#define LINK_SPEED_FW_AUTONEG
Definition bnxt.h:289
#define FLAG_RESET(f, b)
Definition bnxt.h:40
#define TX_BD_SHORT_FLAGS_LHINT_GTE2K
Definition bnxt.h:470
#define DBC_DBC_TYPE_SRQ
Definition bnxt.h:436
#define FLAG_TEST(f, b)
Definition bnxt.h:39
#define RX_RING_BUFFER_SIZE
Definition bnxt.h:151
#define DB_OFFSET_PF
Definition bnxt.h:195
#define MEDIUM_SPEED_200PAM4_112GBPS
Definition bnxt.h:109
#define RESP_BUFFER_SIZE
Definition bnxt.h:157
#define SET_LINK(p, m, s)
Definition bnxt.h:1084
#define RX_MASK_ACCEPT_ALL_MULTICAST
Definition bnxt.h:86
#define NQ_RING_BUFFER_SIZE
Definition bnxt.h:173
#define FLAG_SET(f, b)
Definition bnxt.h:38
#define BNXT_RX_STD_DMA_SZ
Definition bnxt.h:160
#define PHY_STATUS
Definition bnxt.h:206
#define DBC_MSG_XID(xid, flg)
Definition bnxt.h:199
#define VLAN_HDR_SIZE
Definition bnxt.h:190
#define LINK_SPEED_FW_10G
Definition bnxt.h:293
#define TX_DOORBELL_KEY_TX
Definition bnxt.h:386
#define MEDIUM_SPEED_100PAM4_112GBPS
Definition bnxt.h:108
#define RX_DMA_ADDR(bp)
Definition bnxt.h:181
#define REQ_BUFFER_SIZE
Definition bnxt.h:156
#define MEDIUM_SPEED_400PAM4GBPS
Definition bnxt.h:110
#define RX_MASK_ACCEPT_MULTICAST
Definition bnxt.h:85
#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
Definition bnxt.h:534
#define CMPL_BASE_TYPE_MASK
Definition bnxt.h:522
#define BNXT_ER_TIMER_INTERVAL(x)
Definition bnxt.h:1109
#define CQ_DOORBELL_KEY_IDX(a)
Definition bnxt.h:1071
#define LINK_SPEED_FW_50G
Definition bnxt.h:299
#define PCICFG_ME_REGISTER
Definition bnxt.h:125
#define ER_DFLT_FW_RST_MAX_DSECS
Definition bnxt.h:943
#define DMA_DMA_ADDR(bp)
Definition bnxt.h:184
#define CQ_DMA_ADDR(bp)
Definition bnxt.h:179
#define VALID_STAT_CTX
Definition bnxt.h:902
#define CMPL_BASE_TYPE_RX_L2
Definition bnxt.h:525
#define SPEED_DRV_SHIFT
Definition bnxt.h:1087
#define LINK_SPEED_FW_MASK
Definition bnxt.h:287
#define MAX_ETHERNET_PACKET_BUFFER_SIZE
Definition bnxt.h:140
#define CHIP_NUM_57502
Definition bnxt.h:1106
#define LINK_SPEED_FW_2_5G
Definition bnxt.h:317
#define TX_BD_SHORT_FLAGS_LHINT_LT512
Definition bnxt.h:467
#define LINK_SPEED_FW_50G_PAM4
Definition bnxt.h:305
#define TX_BD_SHORT_FLAGS_LHINT_LT2K
Definition bnxt.h:469
#define NO_MORE_CQ_BD_TO_SERVICE
Definition bnxt.h:187
#define DEFAULT_NUMBER_OF_RX_RINGS
Definition bnxt.h:143
#define DETECT_MEDIA
Definition bnxt.h:208
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT
Definition bnxt.h:676
#define DBC_DBC_TYPE_CQ
Definition bnxt.h:438
#define NUM_RX_BUFFERS
Definition bnxt.h:146
#define VALID_L2_FILTER
Definition bnxt.h:909
#define BNXT_FLAG_IS_CHIP_P5_PLUS
Definition bnxt.h:50
#define VALID_RING_RX
Definition bnxt.h:905
#define D3_LINK_SPEED_FW_NUM
Definition bnxt.h:322
#define LINK_SPEED_FW_NUM
Definition bnxt.h:286
#define MEDIUM_SPEED_100GBPS
Definition bnxt.h:104
#define SUPPORT_SPEEDS
Definition bnxt.h:209
#define MEDIUM_FULL_DUPLEX
Definition bnxt.h:119
#define MAX_NQ_DESC_CNT
Definition bnxt.h:172
#define NQ_CN_TYPE_CQ_NOTIFICATION
Definition bnxt.h:575
#define BNXT_FLAG_NPAR_MODE
Definition bnxt.h:45
#define NO_MORE_NQ_BD_TO_SERVICE
Definition bnxt.h:185
#define MEDIUM_SPEED_40GBPS
Definition bnxt.h:102
#define RX_PKT_CMPL_V2
Definition bnxt.h:672
#define REQ_DMA_ADDR(bp)
Definition bnxt.h:182
#define NEXT_IDX(N, S)
Definition bnxt.h:161
#define TX_IN_USE(a, b, c)
Definition bnxt.h:177
#define VF_CFG_ENABLE_FLAGS
Definition bnxt.h:1098
#define LINK_POLL_WAIT_TIME
Definition bnxt.h:167
#define DBC_MSG_EPCH(idx)
Definition bnxt.h:202
#define RX_MASK_PROMISCUOUS_MODE
Definition bnxt.h:89
#define BNXT_FLAG_RESOURCE_QCAPS_SUPPORT
Definition bnxt.h:43
#define SERVICE_NEXT_CQ_BD
Definition bnxt.h:188
#define RX_PKT_V3_CMPL_TYPE_MASK
Definition bnxt.h:720
#define VALID_RX_IOB
Definition bnxt.h:908
#define SUPPORT_SPEEDS2
Definition bnxt.h:210
#define BYTE_SWAP_S(w)
Definition bnxt.h:192
#define VALID_DRIVER_REG
Definition bnxt.h:901
#define HWRM_CMD_FLASH_MULTIPLAYER(a)
Definition bnxt.h:137
#define SPEED_FW_SHIFT
Definition bnxt.h:1089
#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT
Definition bnxt.h:872
#define TX_DMA_ADDR(bp)
Definition bnxt.h:180
#define CHIP_NUM_57504
Definition bnxt.h:1105
#define HWRM_CMD_WAIT(b)
Definition bnxt.h:139
#define RESP_DMA_ADDR(bp)
Definition bnxt.h:183
#define DBC_MSG_TOGGLE(idx)
Definition bnxt.h:204
#define SHORT_CMD_REQUIRED
Definition bnxt.h:1065
#define VALID_RING_TX
Definition bnxt.h:904
#define TX_BD_SHORT_FLAGS_LHINT_LT1K
Definition bnxt.h:468
#define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_SFT
Definition bnxt.h:824
#define MEDIUM_SPEED_200GBPS
Definition bnxt.h:105
#define MEDIUM_SPEED_100PAM4GBPS
Definition bnxt.h:107
#define LINK_SPEED_FW_400G_PAM4
Definition bnxt.h:313
#define MEDIUM_SPEED_50GBPS
Definition bnxt.h:103
#define D3_SPEED_FW_MASK
Definition bnxt.h:1090
#define HWRM_CMD_POLL_WAIT_TIME
Definition bnxt.h:135
#define LINK_SPEED_FW_100G
Definition bnxt.h:301
#define LINK_SPEED_FW_200G
Definition bnxt.h:303
#define BNXT_FLAG_PCI_VF
Definition bnxt.h:47
#define VALID_VNIC_ID
Definition bnxt.h:907
#define SPEED_DRV_MASK
Definition bnxt.h:1086
#define LINK_DEFAULT_TIMEOUT
Definition bnxt.h:166
#define LINK_SPEED_FW_200G_PAM4_112
Definition bnxt.h:311
#define LINK_SPEED_FW_100G_PAM4_112
Definition bnxt.h:309
#define PHY_SPEED
Definition bnxt.h:207
#define BNXT_FLAG_IS_CHIP_P5
Definition bnxt.h:49
#define DBC_DBC_TYPE_NQ_ARM
Definition bnxt.h:445
#define CMPL_BASE_V
Definition bnxt.h:544
#define CQ_RING_BUFFER_SIZE
Definition bnxt.h:153
#define DMA_BUFFER_SIZE
Definition bnxt.h:158
#define BNXT_FLAG_IS_CHIP_P7
Definition bnxt.h:51
#define CMPL_DOORBELL_KEY_CMPL
Definition bnxt.h:408
#define DBC_MSG_IDX(idx)
Definition bnxt.h:197
#define RX_DOORBELL_KEY_RX
Definition bnxt.h:396
#define BNXT_FLAG_LINK_SPEEDS2
Definition bnxt.h:48
#define CMPL_BASE_TYPE_STAT_EJECT
Definition bnxt.h:530
#define MEDIUM_SPEED_1000MBPS
Definition bnxt.h:97
#define MEDIUM_SPEED_10GBPS
Definition bnxt.h:99
#define BNXT_DMA_ALIGNMENT
Definition bnxt.h:154
#define MEDIUM_SPEED_2500MBPS
Definition bnxt.h:98
#define BNXT_FLAG_MULTI_HOST
Definition bnxt.h:44
#define ER_DFLT_FW_RST_MIN_DSECS
Definition bnxt.h:942
#define MEDIUM_SPEED_AUTONEG
Definition bnxt.h:93
#define LINK_SPEED_FW_400G_PAM4_112
Definition bnxt.h:315
#define GRC_COM_CHAN_BASE
Definition bnxt.h:126
#define RX_MASK_ACCEPT_NONE
Definition bnxt.h:83
#define NQ_CN_TOGGLE_SFT
Definition bnxt.h:573
#define HWRM_CMD_DEFAULT_MULTIPLAYER(a)
Definition bnxt.h:136
#define STATUS_SUCCESS
Definition bnxt.h:55
#define MAC_HDR_SIZE
Definition bnxt.h:189
#define QCFG_PHY_ALL
Definition bnxt.h:211
#define VALID_RING_CQ
Definition bnxt.h:903
#define BNXT_FLAG_HWRM_SHORT_CMD_SUPP
Definition bnxt.h:41
#define LM_PAGE_BITS(a)
Definition bnxt.h:159
#define LINK_SPEED_FW_25G
Definition bnxt.h:295
#define IPXE_VERSION_MAJOR
Definition bnxt.h:31
#define DEFAULT_NUMBER_OF_CMPL_RINGS
Definition bnxt.h:141
#define BNXT_ER_WAIT_TIMER_INTERVAL(x)
Definition bnxt.h:1110
#define SPEED_FW_MASK
Definition bnxt.h:1088
#define NQ_CN_V
Definition bnxt.h:589
#define TX_AVAIL(r)
Definition bnxt.h:176
#define RX_RING_QID
Definition bnxt.h:174
#define GRC_COM_CHAN_TRIG
Definition bnxt.h:127
#define MEDIA_AUTO_DETECT_SHIFT
Definition bnxt.h:1093
#define CHIP_NUM_57508
Definition bnxt.h:1104
#define dbg_rx_cid(idx, cid)
Definition bnxt_dbg.h:463
#define dump_evt(cq, ty, id, ring)
Definition bnxt_dbg.h:660
#define dbg_alloc_rx_iob_fail(iob_idx, cons_id)
Definition bnxt_dbg.h:464
#define dbg_hw_cmd(bp, func, cmd_len, resp_len, cmd_tmo, err)
Definition bnxt_dbg.h:378
#define dbg_pci(bp, func, creg)
Definition bnxt_dbg.h:140
#define dbg_alloc_rx_iob(iob, id, cid)
Definition bnxt_dbg.h:462
#define dbg_tx_avail(bp, a, u)
Definition bnxt_dbg.h:563
#define dbg_rx_stat(bp)
Definition bnxt_dbg.h:466
#define dbg_tx_done(pkt, len, idx)
Definition bnxt_dbg.h:569
#define dbg_fw_ver(resp, tmo)
Definition bnxt_dbg.h:319
#define dbg_func_qcaps(bp)
Definition bnxt_dbg.h:321
#define dump_cq(cq, id, toggle)
Definition bnxt_dbg.h:508
#define dbg_link_state(bp, tmo)
Definition bnxt_dbg.h:663
#define dbg_tx_vlan(bp, src, plen, len)
Definition bnxt_dbg.h:564
#define dbg_func_qcfg(bp)
Definition bnxt_dbg.h:322
#define prn_set_speed(speed)
Definition bnxt_dbg.h:323
#define dbg_mem(bp, func)
Definition bnxt_dbg.h:175
#define dbg_rxp(iob, rx_len, drop)
Definition bnxt_dbg.h:465
#define dbg_link_info(bp)
Definition bnxt_dbg.h:662
#define dbg_flags(func, flags)
Definition bnxt_dbg.h:326
#define dbg_func_resource_qcaps(bp)
Definition bnxt_dbg.h:320
#define dbg_short_cmd(sreq, func, len)
Definition bnxt_dbg.h:398
#define dbg_link_status(bp)
Definition bnxt_dbg.h:661
#define dbg_num_rings(bp)
Definition bnxt_dbg.h:325
#define dump_tx_pkt(pkt, len, idx)
Definition bnxt_dbg.h:567
#define dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx)
Definition bnxt_dbg.h:461
#define dump_tx_stat(bp)
Definition bnxt_dbg.h:566
#define dump_nq(nq, id)
Definition bnxt_dbg.h:509
#define dbg_chip_info(bp)
Definition bnxt_dbg.h:324
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_SUPPORTED_LINK_SPEEDS_CHANGE
Definition bnxt_hsi.h:786
#define ASYNC_EVENT_CMPL_ER_EVENT_DATA1_MASTER_FUNC
Definition bnxt_hsi.h:588
#define HWRM_VNIC_FREE
Definition bnxt_hsi.h:150
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT
Definition bnxt_hsi.h:5769
#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_FORCE
Definition bnxt_hsi.h:664
#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT
Definition bnxt_hsi.h:6562
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
Definition bnxt_hsi.h:560
#define PORT_MAC_CFG_REQ_LPBK_NONE
Definition bnxt_hsi.h:3646
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX
Definition bnxt_hsi.h:3110
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
Definition bnxt_hsi.h:1556
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB
Definition bnxt_hsi.h:3168
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY
Definition bnxt_hsi.h:563
#define HWRM_RING_ALLOC
Definition bnxt_hsi.h:160
#define FUNC_CFG_REQ_ENABLES_NUM_MSIX
Definition bnxt_hsi.h:1558
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1753
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56
Definition bnxt_hsi.h:3186
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2
Definition bnxt_hsi.h:3080
#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
Definition bnxt_hsi.h:3042
#define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_FEC_CFG_CHANGE
Definition bnxt_hsi.h:810
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR1
Definition bnxt_hsi.h:1758
#define HWRM_RING_FREE
Definition bnxt_hsi.h:161
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_PCIE_CFG
Definition bnxt_hsi.h:1755
#define HWRM_VERSION_MINOR
Definition bnxt_hsi.h:370
#define HWRM_MAX_REQ_LEN
Definition bnxt_hsi.h:364
#define HWRM_CFA_L2_FILTER_FREE
Definition bnxt_hsi.h:173
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112
Definition bnxt_hsi.h:3190
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX
Definition bnxt_hsi.h:3068
#define RING_FREE_REQ_RING_TYPE_NQ
Definition bnxt_hsi.h:6301
#define HWRM_PORT_PHY_CFG
Definition bnxt_hsi.h:119
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112
Definition bnxt_hsi.h:3191
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
Definition bnxt_hsi.h:6683
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST
Definition bnxt_hsi.h:1417
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56
Definition bnxt_hsi.h:3188
#define HWRM_VNIC_ALLOC
Definition bnxt_hsi.h:149
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE
Definition bnxt_hsi.h:561
#define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_PAUSE_CFG_CHANGE
Definition bnxt_hsi.h:812
#define RING_ALLOC_REQ_INT_MODE_POLL
Definition bnxt_hsi.h:6270
#define HWRM_PORT_PHY_QCAPS
Definition bnxt_hsi.h:129
#define HWRM_VNIC_CFG
Definition bnxt_hsi.h:151
#define ER_QCFG_RESET_REG_ADDR_MASK
Definition bnxt_hsi.h:1803
#define RING_ALLOC_REQ_RING_TYPE_RX
Definition bnxt_hsi.h:6216
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB
Definition bnxt_hsi.h:3184
#define ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_MASK
Definition bnxt_hsi.h:591
#define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY
Definition bnxt_hsi.h:562
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK
Definition bnxt_hsi.h:6530
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56
Definition bnxt_hsi.h:3187
#define HWRM_FUNC_RESOURCE_QCAPS
Definition bnxt_hsi.h:281
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP
Definition bnxt_hsi.h:5821
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB
Definition bnxt_hsi.h:3090
#define ASYNC_EVENT_CMPL_ER_EVENT_DATA1_RECOVERY_ENABLED
Definition bnxt_hsi.h:589
#define ER_QCFG_FW_HEALTH_REG_ADDR_MASK
Definition bnxt_hsi.h:1760
#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
Definition bnxt_hsi.h:1542
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB
Definition bnxt_hsi.h:3085
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB
Definition bnxt_hsi.h:3091
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX
Definition bnxt_hsi.h:3111
#define ER_QCFG_FW_HB_REG_ADDR_MASK
Definition bnxt_hsi.h:1770
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB
Definition bnxt_hsi.h:3183
#define HWRM_FUNC_QCAPS
Definition bnxt_hsi.h:108
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID
Definition bnxt_hsi.h:5827
#define RING_FREE_REQ_RING_TYPE_L2_CMPL
Definition bnxt_hsi.h:6296
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT
Definition bnxt_hsi.h:1835
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56
Definition bnxt_hsi.h:3189
#define SHORT_REQ_SIGNATURE_SHORT_CMD
Definition bnxt_hsi.h:88
#define FUNC_CFG_REQ_EVB_MODE_NO_EVB
Definition bnxt_hsi.h:1614
#define PORT_PHY_QCFG_RESP_LINK_LINK
Definition bnxt_hsi.h:3251
#define FUNC_DRV_RGTR_REQ_ENABLES_VER
Definition bnxt_hsi.h:1838
#define HWRM_CFA_L2_SET_RX_MASK
Definition bnxt_hsi.h:175
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN
Definition bnxt_hsi.h:1889
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST
Definition bnxt_hsi.h:6521
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE
Definition bnxt_hsi.h:2162
#define HWRM_NVM_GET_VARIABLE
Definition bnxt_hsi.h:309
#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME
Definition bnxt_hsi.h:1181
#define HWRM_FUNC_QCFG
Definition bnxt_hsi.h:109
#define HWRM_FUNC_BACKING_STORE_QCFG
Definition bnxt_hsi.h:285
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0
Definition bnxt_hsi.h:1433
#define HWRM_RING_GRP_ALLOC
Definition bnxt_hsi.h:166
#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
Definition bnxt_hsi.h:6681
#define RING_ALLOC_REQ_RING_TYPE_NQ
Definition bnxt_hsi.h:6219
#define HWRM_QUEUE_QPORTCFG
Definition bnxt_hsi.h:135
#define HWRM_FUNC_DRV_UNRGTR
Definition bnxt_hsi.h:113
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
Definition bnxt_hsi.h:3069
#define HWRM_VER_GET
Definition bnxt_hsi.h:98
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK
Definition bnxt_hsi.h:3071
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE
Definition bnxt_hsi.h:3067
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
Definition bnxt_hsi.h:6684
#define HWRM_FUNC_DRV_RGTR
Definition bnxt_hsi.h:116
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
Definition bnxt_hsi.h:3192
#define HWRM_PORT_MAC_CFG
Definition bnxt_hsi.h:120
#define HWRM_VERSION_MAJOR
Definition bnxt_hsi.h:369
#define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EEE_CFG_CHANGE
Definition bnxt_hsi.h:811
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB
Definition bnxt_hsi.h:3093
#define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1783
#define RING_FREE_REQ_RING_TYPE_TX
Definition bnxt_hsi.h:6297
#define RING_ALLOC_REQ_RING_TYPE_TX
Definition bnxt_hsi.h:6215
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
Definition bnxt_hsi.h:3170
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
Definition bnxt_hsi.h:1841
#define HWRM_FUNC_VF_CFG
Definition bnxt_hsi.h:102
#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR
Definition bnxt_hsi.h:1551
#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER
Definition bnxt_hsi.h:1844
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
Definition bnxt_hsi.h:6517
#define FUNC_CFG_REQ_ENABLES_EVB_MODE
Definition bnxt_hsi.h:1554
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_GRC
Definition bnxt_hsi.h:1756
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT
Definition bnxt_hsi.h:1834
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB
Definition bnxt_hsi.h:3181
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
Definition bnxt_hsi.h:3107
#define HWRM_VERSION_UPDATE
Definition bnxt_hsi.h:371
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL
Definition bnxt_hsi.h:6214
#define HWRM_PORT_PHY_QCFG
Definition bnxt_hsi.h:126
#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
Definition bnxt_hsi.h:1545
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB
Definition bnxt_hsi.h:3092
#define HWRM_CFA_L2_FILTER_ALLOC
Definition bnxt_hsi.h:172
#define ER_QCFG_RESET_INPRG_REG_ADDR_MASK
Definition bnxt_hsi.h:1790
#define HWRM_ER_QCFG
Definition bnxt_hsi.h:99
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB
Definition bnxt_hsi.h:3182
#define FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE
Definition bnxt_hsi.h:1366
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK
Definition bnxt_hsi.h:3081
#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_MASK
Definition bnxt_hsi.h:665
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_ILLEGAL_LINK_SPEED_CFG
Definition bnxt_hsi.h:787
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
Definition bnxt_hsi.h:554
#define HWRM_FUNC_BACKING_STORE_CFG
Definition bnxt_hsi.h:284
#define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE
Definition bnxt_hsi.h:1831
#define HWRM_NA_SIGNATURE
Definition bnxt_hsi.h:363
#define VNIC_CFG_REQ_ENABLES_MRU
Definition bnxt_hsi.h:5825
#define ER_QCFG_FW_HB_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1763
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR
Definition bnxt_hsi.h:6529
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID
Definition bnxt_hsi.h:5826
#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
Definition bnxt_hsi.h:3102
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED
Definition bnxt_hsi.h:3078
#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
Definition bnxt_hsi.h:6682
#define FUNC_CFG_REQ_ENABLES_NUM_VNICS
Definition bnxt_hsi.h:1544
#define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1808
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB
Definition bnxt_hsi.h:3185
#define ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_FATAL
Definition bnxt_hsi.h:590
#define PORT_PHY_CFG_REQ_FLAGS_FORCE
Definition bnxt_hsi.h:3044
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID
Definition bnxt_hsi.h:6211
#define ER_QCFG_RESET_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1796
#define RING_FREE_REQ_RING_TYPE_RX
Definition bnxt_hsi.h:6298
#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
Definition bnxt_hsi.h:1541
#define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1773
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID
Definition bnxt_hsi.h:6544
#define HWRM_STAT_CTX_ALLOC
Definition bnxt_hsi.h:191
#define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED
Definition bnxt_hsi.h:4237
#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
Definition bnxt_hsi.h:1540
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB
Definition bnxt_hsi.h:3169
#define HWRM_FUNC_RESET
Definition bnxt_hsi.h:104
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR0
Definition bnxt_hsi.h:1757
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE
Definition bnxt_hsi.h:556
#define ER_QCFG_RCVRY_CNT_REG_ADDR_MASK
Definition bnxt_hsi.h:1815
#define HWRM_FUNC_CFG
Definition bnxt_hsi.h:110
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE
Definition bnxt_hsi.h:1837
#define HWRM_STAT_CTX_FREE
Definition bnxt_hsi.h:192
#define HWRM_RING_GRP_FREE
Definition bnxt_hsi.h:167
#define ER_QCFG_FW_RESET_CNT_REG_ADDR_MASK
Definition bnxt_hsi.h:1780
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID
Definition bnxt_hsi.h:6212
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB
Definition bnxt_hsi.h:3088
struct bofm_section_header done
Definition bofm_test.c:46
ring len
Length.
Definition dwmac.h:226
#define ARRAY_SIZE(x)
Definition efx_common.h:43
uint32_t type
Operating system type.
Definition ena.h:1
uint8_t flags
Flags.
Definition ena.h:7
struct ena_llq_option desc
Descriptor counts.
Definition ena.h:9
Error codes.
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition ethernet.c:265
Ethernet protocol.
static int is_valid_ether_addr(const void *addr)
Check if Ethernet address is valid.
Definition ethernet.h:78
static struct net_device * netdev
Definition gdbudp.c:53
#define __unused
Declare a variable or data structure as unused.
Definition compiler.h:573
#define DBGP(...)
Definition compiler.h:532
#define FILE_LICENCE(_licence)
Declare a particular licence as applying to a file.
Definition compiler.h:896
#define EINVAL
Invalid argument.
Definition errno.h:429
#define ENOMEM
Not enough space.
Definition errno.h:535
#define EBUSY
Device or resource busy.
Definition errno.h:339
#define ENOBUFS
No buffer space available.
Definition errno.h:499
#define ETH_ALEN
Definition if_ether.h:9
#define u8
Definition igbvf_osdep.h:40
#define barrier()
Optimisation barrier.
Definition compiler.h:633
void mb(void)
Memory barrier.
#define writeq(data, io_addr)
Definition io.h:273
void iounmap(volatile const void *io_addr)
Unmap I/O address.
int pci_read_config_dword(struct pci_device *pci, unsigned int where, uint32_t *value)
Read 32-bit dword from PCI configuration space.
int pci_read_config_word(struct pci_device *pci, unsigned int where, uint16_t *value)
Read 16-bit word from PCI configuration space.
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
int pci_write_config_word(struct pci_device *pci, unsigned int where, uint16_t value)
Write 16-bit word to PCI configuration space.
int pci_read_config_byte(struct pci_device *pci, unsigned int where, uint8_t *value)
Read byte from PCI configuration space.
int pci_write_config_dword(struct pci_device *pci, unsigned int where, uint32_t value)
Write 32-bit dword to PCI configuration space.
iPXE timers
void __asmcall int val
Definition setjmp.h:12
uint64_t u64
Definition stdint.h:26
String functions.
void * memcpy(void *dest, const void *src, size_t len) __nonnull
void * memset(void *dest, int character, size_t len) __nonnull
void * memmove(void *dest, const void *src, size_t len) __nonnull
struct io_buffer * alloc_rx_iob(size_t len, struct dma_device *dma)
Allocate and map I/O buffer for receive DMA.
Definition iobuf.c:188
void free_rx_iob(struct io_buffer *iobuf)
Unmap and free I/O buffer for receive DMA.
Definition iobuf.c:215
I/O buffers.
#define iob_put(iobuf, len)
Definition iobuf.h:125
static __always_inline physaddr_t iob_dma(struct io_buffer *iobuf)
Get I/O buffer DMA address.
Definition iobuf.h:268
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition iobuf.h:160
DMA mappings.
void dma_free(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
void * dma_alloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer.
Dynamic memory allocation.
Media Independent Interface constants.
static unsigned int unsigned int reg
Definition myson.h:162
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition netdevice.c:231
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition netdevice.c:549
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition netdevice.c:942
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition netdevice.c:587
int register_netdev(struct net_device *netdev)
Register network device.
Definition netdevice.c:760
Network device management.
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition netdevice.h:789
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition netdevice.h:519
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition netdevice.h:532
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition netdevice.h:576
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition netdevice.h:767
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Get the size of a PCI BAR.
Definition pci.c:164
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition pci.c:241
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition pci.c:97
PCI bus.
#define __pci_driver
Declare a PCI driver.
Definition pci.h:278
#define PCI_SUBSYSTEM_ID
PCI subsystem ID.
Definition pci.h:79
#define PCI_COMMAND_MASTER
Bus master.
Definition pci.h:29
#define PCI_BASE_ADDRESS_2
Definition pci.h:65
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition pci.h:366
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition pci.h:308
#define PCI_COMMAND_INTX_DISABLE
Interrupt disable.
Definition pci.h:33
#define PCI_BASE_ADDRESS_0
Definition pci.h:63
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition pci.h:376
#define PCI_COMMAND
PCI command.
Definition pci.h:26
#define PCI_BASE_ADDRESS_4
Definition pci.h:67
#define PCI_SUBSYSTEM_VENDOR_ID
PCI subsystem vendor ID.
Definition pci.h:76
uint16_t bp
Definition registers.h:9
void start_timer_fixed(struct retry_timer *timer, unsigned long timeout)
Start timer with a specified timeout.
Definition retry.c:65
void stop_timer(struct retry_timer *timer)
Stop timer.
Definition retry.c:118
#define container_of(ptr, type, field)
Get containing structure.
Definition stddef.h:36
Definition bnxt.h:951
struct retry_timer wait_timer
Definition bnxt.h:989
struct net_device * dev
Definition bnxt.h:969
struct retry_timer task_timer
Definition bnxt.h:988
u16 type
Definition bnxt.h:521
u32 info3_v
Definition bnxt.h:543
__le16 req_type
Definition bnxt_hsi.h:86
__le16 signature
Definition bnxt_hsi.h:87
__le64 req_addr
Definition bnxt_hsi.h:92
__le16 default_rx_ring_id
Definition bnxt_hsi.h:5834
__le16 default_cmpl_ring_id
Definition bnxt_hsi.h:5835
__le16 seq_id
Definition bnxt_hsi.h:71
__le16 req_type
Definition bnxt_hsi.h:69
__le16 target_id
Definition bnxt_hsi.h:72
__le64 resp_addr
Definition bnxt_hsi.h:73
__le16 cmpl_ring
Definition bnxt_hsi.h:70
A persistent I/O buffer.
Definition iobuf.h:38
void * data
Start of data.
Definition iobuf.h:53
Network device operations.
Definition netdevice.h:214
A network device.
Definition netdevice.h:353
void * priv
Driver private data.
Definition netdevice.h:432
struct device * dev
Underlying hardware device.
Definition netdevice.h:365
u16 type
Definition bnxt.h:562
u32 v
Definition bnxt.h:583
__le16 error_code
Definition bnxt_hsi.h:78
__le16 req_type
Definition bnxt_hsi.h:79
__le16 resp_len
Definition bnxt_hsi.h:81
__le16 seq_id
Definition bnxt_hsi.h:80
A PCI device ID list entry.
Definition pci.h:175
unsigned long driver_data
Arbitrary driver data.
Definition pci.h:183
A PCI device.
Definition pci.h:211
struct device dev
Generic device.
Definition pci.h:213
struct pci_device_id * id
Driver device ID.
Definition pci.h:248
struct dma_device dma
DMA device.
Definition pci.h:215
A PCI driver.
Definition pci.h:252
A retry timer.
Definition retry.h:22
u16 errors_v2
Definition bnxt.h:671
u32 opaque
Definition bnxt.h:637
u16 len
Definition bnxt.h:636
u16 flags_type
Definition bnxt.h:719
A timer.
Definition timer.h:29
physaddr_t dma
Definition bnxt.h:475
u32 opaque
Definition bnxt.h:474
u16 len
Definition bnxt.h:473
u16 flags_type
Definition bnxt.h:454
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition timer.c:79
void udelay(unsigned long usecs)
Delay for a fixed number of microseconds.
Definition timer.c:61
uint32_t data_len
Microcode data size (or 0 to indicate 2000 bytes)
Definition ucode.h:15
#define u16
Definition vga.h:20
#define u32
Definition vga.h:21
#define readl
Definition w89c840.c:157
#define writel
Definition w89c840.c:160
u8 tx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets to the AP.
Definition wpa.h:4
u8 rx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets from the AP.
Definition wpa.h:1