iPXE
bnxt.c
Go to the documentation of this file.
1
2FILE_LICENCE ( GPL2_ONLY );
3
4#include <mii.h>
5#include <stdio.h>
6#include <string.h>
7#include <errno.h>
8#include <unistd.h>
9#include <byteswap.h>
10#include <ipxe/pci.h>
11#include <ipxe/iobuf.h>
12#include <ipxe/dma.h>
13#include <ipxe/timer.h>
14#include <ipxe/malloc.h>
15#include <ipxe/if_ether.h>
16#include <ipxe/ethernet.h>
17#include <ipxe/netdevice.h>
18#include "bnxt.h"
19#include "bnxt_dbg.h"
20
21static void bnxt_service_cq ( struct net_device *dev );
22static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx );
23static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt );
24static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt );
25static int bnxt_rx_complete ( struct net_device *dev, struct rx_pkt_cmpl *rx );
26void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt );
27
28static struct pci_device_id bnxt_nics[] = {
29 PCI_ROM( 0x14e4, 0x1604, "14e4-1604", "Broadcom BCM957454", 0 ),
30 PCI_ROM( 0x14e4, 0x1605, "14e4-1605", "Broadcom BCM957454 RDMA", 0 ),
31 PCI_ROM( 0x14e4, 0x1606, "14e4-1606", "Broadcom BCM957454 RDMA VF", BNXT_FLAG_PCI_VF ),
32 PCI_ROM( 0x14e4, 0x1607, "bcm957454-1607", "Broadcom BCM957454 HV VF", BNXT_FLAG_PCI_VF ),
33 PCI_ROM( 0x14e4, 0x1608, "bcm957454-1608", "Broadcom BCM957454 RDMA HV VF", BNXT_FLAG_PCI_VF ),
34 PCI_ROM( 0x14e4, 0x1609, "14e4-1609", "Broadcom BCM957454 VF", BNXT_FLAG_PCI_VF ),
35 PCI_ROM( 0x14e4, 0x1614, "14e4-1614", "Broadcom BCM957454", 0 ),
36 PCI_ROM( 0x14e4, 0x16bd, "bcm95741x-16bd", "Broadcom BCM95741x RDMA_HV_VF", BNXT_FLAG_PCI_VF ),
37 PCI_ROM( 0x14e4, 0x16c0, "14e4-16c0", "Broadcom BCM957417", 0 ),
38 PCI_ROM( 0x14e4, 0x16c1, "14e4-16c1", "Broadcom BCM95741x VF", BNXT_FLAG_PCI_VF ),
39 PCI_ROM( 0x14e4, 0x16c5, "bcm95741x-16c5", "Broadcom BCM95741x HV VF", BNXT_FLAG_PCI_VF ),
40 PCI_ROM( 0x14e4, 0x16c8, "14e4-16c8", "Broadcom BCM957301", 0 ),
41 PCI_ROM( 0x14e4, 0x16c9, "14e4-16c9", "Broadcom BCM957302", 0 ),
42 PCI_ROM( 0x14e4, 0x16ca, "14e4-16ca", "Broadcom BCM957304", 0 ),
43 PCI_ROM( 0x14e4, 0x16cc, "14e4-16cc", "Broadcom BCM957417 MF", 0 ),
44 PCI_ROM( 0x14e4, 0x16cd, "14e4-16cd", "Broadcom BCM958700", 0 ),
45 PCI_ROM( 0x14e4, 0x16ce, "14e4-16ce", "Broadcom BCM957311", 0 ),
46 PCI_ROM( 0x14e4, 0x16cf, "14e4-16cf", "Broadcom BCM957312", 0 ),
47 PCI_ROM( 0x14e4, 0x16d0, "14e4-16d0", "Broadcom BCM957402", 0 ),
48 PCI_ROM( 0x14e4, 0x16d1, "14e4-16d1", "Broadcom BCM957404", 0 ),
49 PCI_ROM( 0x14e4, 0x16d2, "14e4-16d2", "Broadcom BCM957406", 0 ),
50 PCI_ROM( 0x14e4, 0x16d4, "14e4-16d4", "Broadcom BCM957402 MF", 0 ),
51 PCI_ROM( 0x14e4, 0x16d5, "14e4-16d5", "Broadcom BCM957407", 0 ),
52 PCI_ROM( 0x14e4, 0x16d6, "14e4-16d6", "Broadcom BCM957412", 0 ),
53 PCI_ROM( 0x14e4, 0x16d7, "14e4-16d7", "Broadcom BCM957414", 0 ),
54 PCI_ROM( 0x14e4, 0x16d8, "14e4-16d8", "Broadcom BCM957416", 0 ),
55 PCI_ROM( 0x14e4, 0x16d9, "14e4-16d9", "Broadcom BCM957417", 0 ),
56 PCI_ROM( 0x14e4, 0x16da, "14e4-16da", "Broadcom BCM957402", 0 ),
57 PCI_ROM( 0x14e4, 0x16db, "14e4-16db", "Broadcom BCM957404", 0 ),
58 PCI_ROM( 0x14e4, 0x16dc, "14e4-16dc", "Broadcom BCM95741x VF", BNXT_FLAG_PCI_VF ),
59 PCI_ROM( 0x14e4, 0x16de, "14e4-16de", "Broadcom BCM957412 MF", 0 ),
60 PCI_ROM( 0x14e4, 0x16df, "14e4-16df", "Broadcom BCM957314", 0 ),
61 PCI_ROM( 0x14e4, 0x16e0, "14e4-16e0", "Broadcom BCM957317", 0 ),
62 PCI_ROM( 0x14e4, 0x16e2, "14e4-16e2", "Broadcom BCM957417", 0 ),
63 PCI_ROM( 0x14e4, 0x16e3, "14e4-16e3", "Broadcom BCM957416", 0 ),
64 PCI_ROM( 0x14e4, 0x16e4, "14e4-16e4", "Broadcom BCM957317", 0 ),
65 PCI_ROM( 0x14e4, 0x16e7, "14e4-16e7", "Broadcom BCM957404 MF", 0 ),
66 PCI_ROM( 0x14e4, 0x16e8, "14e4-16e8", "Broadcom BCM957406 MF", 0 ),
67 PCI_ROM( 0x14e4, 0x16e9, "14e4-16e9", "Broadcom BCM957407", 0 ),
68 PCI_ROM( 0x14e4, 0x16ea, "14e4-16ea", "Broadcom BCM957407 MF", 0 ),
69 PCI_ROM( 0x14e4, 0x16eb, "14e4-16eb", "Broadcom BCM957412 RDMA MF", 0 ),
70 PCI_ROM( 0x14e4, 0x16ec, "14e4-16ec", "Broadcom BCM957414 MF", 0 ),
71 PCI_ROM( 0x14e4, 0x16ed, "14e4-16ed", "Broadcom BCM957414 RDMA MF", 0 ),
72 PCI_ROM( 0x14e4, 0x16ee, "14e4-16ee", "Broadcom BCM957416 MF", 0 ),
73 PCI_ROM( 0x14e4, 0x16ef, "14e4-16ef", "Broadcom BCM957416 RDMA MF", 0 ),
74 PCI_ROM( 0x14e4, 0x16f0, "14e4-16f0", "Broadcom BCM957320", 0 ),
75 PCI_ROM( 0x14e4, 0x16f1, "14e4-16f1", "Broadcom BCM957320", 0 ),
76 PCI_ROM( 0x14e4, 0x1750, "14e4-1750", "Broadcom BCM957508", 0 ),
77 PCI_ROM( 0x14e4, 0x1751, "14e4-1751", "Broadcom BCM957504", 0 ),
78 PCI_ROM( 0x14e4, 0x1752, "14e4-1752", "Broadcom BCM957502", 0 ),
79 PCI_ROM( 0x14e4, 0x1760, "14e4-1760", "Broadcom BCM957608", 0 ),
80 PCI_ROM( 0x14e4, 0x1800, "14e4-1800", "Broadcom BCM957502 MF", 0 ),
81 PCI_ROM( 0x14e4, 0x1801, "14e4-1801", "Broadcom BCM957504 MF", 0 ),
82 PCI_ROM( 0x14e4, 0x1802, "14e4-1802", "Broadcom BCM957508 MF", 0 ),
83 PCI_ROM( 0x14e4, 0x1803, "14e4-1803", "Broadcom BCM957502 RDMA MF", 0 ),
84 PCI_ROM( 0x14e4, 0x1804, "14e4-1804", "Broadcom BCM957504 RDMA MF", 0 ),
85 PCI_ROM( 0x14e4, 0x1805, "14e4-1805", "Broadcom BCM957508 RDMA MF", 0 ),
86 PCI_ROM( 0x14e4, 0x1806, "14e4-1806", "Broadcom BCM9575xx VF", BNXT_FLAG_PCI_VF ),
87 PCI_ROM( 0x14e4, 0x1807, "14e4-1807", "Broadcom BCM9575xx RDMA VF", BNXT_FLAG_PCI_VF ),
88 PCI_ROM( 0x14e4, 0x1808, "14e4-1808", "Broadcom BCM9575xx HV VF", BNXT_FLAG_PCI_VF ),
89 PCI_ROM( 0x14e4, 0x1809, "14e4-1809", "Broadcom BCM9575xx RDMA HV VF", BNXT_FLAG_PCI_VF ),
90 PCI_ROM( 0x14e4, 0x1819, "bcm95760x-1819", "Broadcom BCM95760x VF", BNXT_FLAG_PCI_VF ),
91 PCI_ROM( 0x14e4, 0x181b, "bcm95760x-181b", "Broadcom BCM95760x HV VF", BNXT_FLAG_PCI_VF ),
92};
93
94/**
95 * Check if Virtual Function
96 */
98{
99 if ( FLAG_TEST ( pdev->id->driver_data, BNXT_FLAG_PCI_VF ) ) {
100 return 1;
101 }
102 return 0;
103}
104
105static void bnxt_down_pci ( struct bnxt *bp )
106{
107 DBGP ( "%s\n", __func__ );
108 if ( bp->bar2 ) {
109 iounmap ( bp->bar2 );
110 bp->bar2 = NULL;
111 }
112 if ( bp->bar1 ) {
113 iounmap ( bp->bar1 );
114 bp->bar1 = NULL;
115 }
116 if ( bp->bar0 ) {
117 iounmap ( bp->bar0 );
118 bp->bar0 = NULL;
119 }
120}
121
122static void *bnxt_pci_base ( struct pci_device *pdev, unsigned int reg )
123{
124 unsigned long reg_base, reg_size;
125
126 reg_base = pci_bar_start ( pdev, reg );
127 reg_size = pci_bar_size ( pdev, reg );
128 return pci_ioremap ( pdev, reg_base, reg_size );
129}
130
131static int bnxt_get_pci_info ( struct bnxt *bp )
132{
133 u16 cmd_reg = 0;
134
135 DBGP ( "%s\n", __func__ );
136 /* Disable Interrupt */
137 pci_read_config_word ( bp->pdev, PCI_COMMAND, &bp->cmd_reg );
138 cmd_reg = bp->cmd_reg | PCI_COMMAND_INTX_DISABLE;
139 pci_write_config_word ( bp->pdev, PCI_COMMAND, cmd_reg );
140 pci_read_config_word ( bp->pdev, PCI_COMMAND, &cmd_reg );
141
142 /* SSVID */
144 &bp->subsystem_vendor );
145
146 /* SSDID */
148 &bp->subsystem_device );
149
150 /* Function Number */
151 pci_read_config_byte ( bp->pdev, PCICFG_ME_REGISTER, &bp->pf_num);
152
153 /* Get Bar Address */
154 bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 );
155 bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 );
156 bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 );
157
158 /* Virtual function */
159 bp->vf = bnxt_is_pci_vf ( bp->pdev );
160
161 dbg_pci ( bp, __func__, cmd_reg );
162 return STATUS_SUCCESS;
163}
164
165static int bnxt_get_device_address ( struct bnxt *bp )
166{
167 struct net_device *dev = bp->dev;
168
169 DBGP ( "%s\n", __func__ );
170 memcpy ( &dev->hw_addr[0], ( char * ) &bp->mac_addr[0], ETH_ALEN );
171 if ( !is_valid_ether_addr ( &dev->hw_addr[0] ) ) {
172 DBGP ( "- %s ( ): Failed\n", __func__ );
173 return -EINVAL;
174 }
175
176 return STATUS_SUCCESS;
177}
178
179static void bnxt_set_link ( struct bnxt *bp )
180{
181 if ( bp->link_status == STATUS_LINK_ACTIVE )
182 netdev_link_up ( bp->dev );
183 else
184 netdev_link_down ( bp->dev );
185}
186
187static void dev_p5_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag )
188{
189 void *off;
190 u64 val;
191
192 if ( bp->vf )
193 off = ( void * ) ( bp->bar1 + DB_OFFSET_VF );
194 else
195 off = ( void * ) ( bp->bar1 + DB_OFFSET_PF );
196
197 val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) |
198 ( u64 )DBC_MSG_IDX ( idx );
199 writeq ( val, off );
200}
201
202static void dev_p7_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, u32 toggle )
203{
204 void *off;
205 u64 val;
206
207 off = ( void * ) ( bp->bar1 );
208
209 val = ( ( u64 ) DBC_MSG_XID ( xid, flag ) << 32 ) |
210 ( u64 ) DBC_MSG_IDX ( idx ) |
211 ( u64 ) DBC_MSG_EPCH ( epoch ) |
212 ( u64 ) DBC_MSG_TOGGLE ( toggle );
213 writeq ( val, off );
214}
215
216static void bnxt_db_nq ( struct bnxt *bp )
217{
218 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
219 dev_p7_db ( bp, ( u32 ) bp->nq.cons_id,
220 ( u32 ) bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM,
221 ( u32 ) bp->nq.epoch, 0 );
222 else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
223 dev_p5_db ( bp, ( u32 ) bp->nq.cons_id,
224 ( u32 ) bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM );
225 else
226 writel ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) );
227}
228
229static void bnxt_db_cq ( struct bnxt *bp )
230{
231 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
232 dev_p7_db ( bp, ( u32 ) bp->cq.cons_id,
233 ( u32 ) bp->cq_ring_id, DBC_DBC_TYPE_CQ,
234 ( u32 ) bp->cq.epoch, ( u32 )bp->nq.toggle );
235 else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
236 dev_p5_db ( bp, ( u32 ) bp->cq.cons_id,
237 ( u32 ) bp->cq_ring_id, DBC_DBC_TYPE_CQ);
238 else
239 writel ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ),
240 ( bp->bar1 + 0 ) );
241}
242
243static void bnxt_db_rx ( struct bnxt *bp, u32 idx )
244{
245 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
246 dev_p7_db ( bp, idx, ( u32 ) bp->rx_ring_id, DBC_DBC_TYPE_SRQ,
247 ( u32 ) bp->rx.epoch, 0 );
248 else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
249 dev_p5_db ( bp, idx, ( u32 ) bp->rx_ring_id, DBC_DBC_TYPE_SRQ );
250 else
251 writel ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) );
252}
253
254static void bnxt_db_tx ( struct bnxt *bp, u32 idx )
255{
256 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
257 dev_p7_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ,
258 ( u32 )bp->tx.epoch, 0 );
259 else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
260 dev_p5_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ );
261 else
262 writel ( TX_DOORBELL_KEY_TX | idx, ( bp->bar1 + 0 ) );
263}
264
265void bnxt_add_vlan ( struct io_buffer *iob, u16 vlan )
266{
267 char *src = ( char * )iob->data;
268 u16 len = iob_len ( iob );
269
270 memmove ( ( char * ) &src[MAC_HDR_SIZE + VLAN_HDR_SIZE],
271 ( char * ) &src[MAC_HDR_SIZE],
272 ( len - MAC_HDR_SIZE ) );
273
274 * ( u16 * ) ( &src[MAC_HDR_SIZE] ) = BYTE_SWAP_S ( ETHERTYPE_VLAN );
275 * ( u16 * ) ( &src[MAC_HDR_SIZE + 2] ) = BYTE_SWAP_S ( vlan );
276 iob_put ( iob, VLAN_HDR_SIZE );
277}
278
279static u16 bnxt_get_pkt_vlan ( char *src )
280{
281 if ( * ( ( u16 * ) &src[MAC_HDR_SIZE] ) == BYTE_SWAP_S ( ETHERTYPE_VLAN ) )
282 return BYTE_SWAP_S ( * ( ( u16 * ) &src[MAC_HDR_SIZE + 2] ) );
283 return 0;
284}
285
286static inline u32 bnxt_tx_avail ( struct bnxt *bp )
287{
288 u32 avail;
289 u32 use;
290
291 barrier ( );
292 avail = TX_AVAIL ( bp->tx.ring_cnt );
293 use = TX_IN_USE ( bp->tx.prod_id, bp->tx.cons_id, bp->tx.ring_cnt );
294 dbg_tx_avail ( bp, avail, use );
295 return ( avail-use );
296}
297
298void bnxt_set_txq ( struct bnxt *bp, int entry, physaddr_t mapping, int len )
299{
300 struct tx_bd_short *prod_bd;
301
302 prod_bd = ( struct tx_bd_short * ) BD_NOW ( bp->tx.bd_virt,
303 entry, sizeof ( struct tx_bd_short ) );
304 if ( len < 512 )
306 else if ( len < 1024 )
308 else if ( len < 2048 )
310 else
312 prod_bd->flags_type |= TX_BD_FLAGS;
313 prod_bd->dma = mapping;
314 prod_bd->len = len;
315 prod_bd->opaque = ( u32 )entry;
316}
317
318static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx )
319{
320 struct bnxt *bp = dev->priv;
321 struct io_buffer *iob;
322
323 iob = bp->tx.iob[hw_idx];
324 dbg_tx_done ( iob->data, iob_len ( iob ), hw_idx );
325 netdev_tx_complete ( dev, iob );
326 bp->tx.cons_id = NEXT_IDX ( hw_idx, bp->tx.ring_cnt );
327 bp->tx.cnt++;
328 dump_tx_stat ( bp );
329}
330
331int bnxt_free_rx_iob ( struct bnxt *bp )
332{
333 unsigned int i;
334
335 DBGP ( "%s\n", __func__ );
336 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RX_IOB ) ) )
337 return STATUS_SUCCESS;
338
339 for ( i = 0; i < bp->rx.buf_cnt; i++ ) {
340 if ( bp->rx.iob[i] ) {
341 free_rx_iob ( bp->rx.iob[i] );
342 bp->rx.iob[i] = NULL;
343 }
344 }
345 bp->rx.iob_cnt = 0;
346
347 FLAG_RESET ( bp->flag_hwrm, VALID_RX_IOB );
348 return STATUS_SUCCESS;
349}
350
351static void bnxt_set_rx_desc ( u8 *buf, struct io_buffer *iob,
352 u16 cid, u32 idx )
353{
354 struct rx_prod_pkt_bd *desc;
355 u16 off = cid * sizeof ( struct rx_prod_pkt_bd );
356
357 desc = ( struct rx_prod_pkt_bd * ) &buf[off];
360 desc->opaque = idx;
361 desc->dma = iob_dma ( iob );
362}
363
364static int bnxt_alloc_rx_iob ( struct bnxt *bp, u16 cons_id, u16 iob_idx )
365{
366 struct io_buffer *iob;
367
368 iob = alloc_rx_iob ( BNXT_RX_STD_DMA_SZ, bp->dma );
369 if ( !iob ) {
370 DBGP ( "- %s ( ): alloc_iob Failed\n", __func__ );
371 return -ENOMEM;
372 }
373
374 dbg_alloc_rx_iob ( iob, iob_idx, cons_id );
375 bnxt_set_rx_desc ( ( u8 * )bp->rx.bd_virt, iob, cons_id,
376 ( u32 ) iob_idx );
377 bp->rx.iob[iob_idx] = iob;
378 return 0;
379}
380
382{
383 u16 cons_id = ( bp->rx.cons_id % bp->rx.ring_cnt );
384 u16 iob_idx;
385
386 while ( bp->rx.iob_cnt < bp->rx.buf_cnt ) {
387 iob_idx = ( cons_id % bp->rx.buf_cnt );
388 if ( !bp->rx.iob[iob_idx] ) {
389 if ( bnxt_alloc_rx_iob ( bp, cons_id, iob_idx ) < 0 ) {
390 dbg_alloc_rx_iob_fail ( iob_idx, cons_id );
391 break;
392 }
393 }
394 cons_id = NEXT_IDX ( cons_id, bp->rx.ring_cnt );
395 /* If the ring has wrapped, flip the epoch bit */
396 if ( iob_idx > cons_id )
397 bp->rx.epoch ^= 1;
398 bp->rx.iob_cnt++;
399 }
400
401 if ( cons_id != bp->rx.cons_id ) {
402 dbg_rx_cid ( bp->rx.cons_id, cons_id );
403 bp->rx.cons_id = cons_id;
404 bnxt_db_rx ( bp, ( u32 )cons_id );
405 }
406
407 FLAG_SET ( bp->flag_hwrm, VALID_RX_IOB );
408 return STATUS_SUCCESS;
409}
410
411u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob,
412 struct rx_pkt_cmpl *rx_cmp,
413 struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len )
414{
415 struct rx_pkt_v3_cmpl *rx_cmp_v3 = ( struct rx_pkt_v3_cmpl * ) rx_cmp;
416 struct rx_pkt_v3_cmpl_hi *rx_cmp_hi_v3 = ( struct rx_pkt_v3_cmpl_hi * ) rx_cmp_hi;
417 u8 *rx_buf = ( u8 * ) iob->data;
418 u16 err_flags;
419 u8 ignore_chksum_err = 0;
420 int i;
421
422 if ( ( rx_cmp_v3->flags_type & RX_PKT_V3_CMPL_TYPE_MASK ) ==
424 err_flags = rx_cmp_hi_v3->errors_v2 >> RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_SFT;
425 } else
426 err_flags = rx_cmp_hi->errors_v2 >> RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT;
427 if ( rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21 )
428 ignore_chksum_err = 1;
429
430 if ( err_flags && !ignore_chksum_err ) {
431 bp->rx.drop_err++;
432 return 1;
433 }
434
435 for ( i = 0; i < 6; i++ ) {
436 if ( rx_buf[6 + i] != bp->mac_addr[i] )
437 break;
438 }
439
440 /* Drop the loopback packets */
441 if ( i == 6 ) {
442 bp->rx.drop_lb++;
443 return 2;
444 }
445
446 iob_put ( iob, rx_len );
447
448 bp->rx.good++;
449 return 0;
450}
451
452static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt )
453{
454 u16 cons_id;
455
456 cons_id = bp->cq.cons_id + cnt;
457 if ( cons_id >= bp->cq.ring_cnt ) {
458 /* Toggle completion bit when the ring wraps. */
459 bp->cq.completion_bit ^= 1;
460 bp->cq.epoch ^= 1;
461 cons_id = cons_id - bp->cq.ring_cnt;
462 }
463 bp->cq.cons_id = cons_id;
464}
465
466void bnxt_rx_process ( struct net_device *dev, struct bnxt *bp,
467 struct rx_pkt_cmpl *rx_cmp,
468 struct rx_pkt_cmpl_hi *rx_cmp_hi )
469{
470 u32 desc_idx = rx_cmp->opaque;
471 struct io_buffer *iob = bp->rx.iob[desc_idx];
472 u8 drop;
473
474 dump_rx_bd ( rx_cmp, rx_cmp_hi, desc_idx );
475 assert ( iob );
476 drop = bnxt_rx_drop ( bp, iob, rx_cmp, rx_cmp_hi, rx_cmp->len );
477 dbg_rxp ( iob->data, rx_cmp->len, drop );
478 if ( drop )
479 netdev_rx_err ( dev, iob, -EINVAL );
480 else
481 netdev_rx ( dev, iob );
482
483 bp->rx.cnt++;
484 bp->rx.iob[desc_idx] = NULL;
485 bp->rx.iob_cnt--;
487 bnxt_adv_cq_index ( bp, 2 ); /* Rx completion is 2 entries. */
488 dbg_rx_stat ( bp );
489}
490
491static int bnxt_rx_complete ( struct net_device *dev,
492 struct rx_pkt_cmpl *rx_cmp )
493{
494 struct bnxt *bp = dev->priv;
495 struct rx_pkt_cmpl_hi *rx_cmp_hi;
496 u8 cmpl_bit = bp->cq.completion_bit;
497
498 if ( bp->cq.cons_id == ( bp->cq.ring_cnt - 1 ) ) {
499 rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) CQ_DMA_ADDR ( bp );
500 cmpl_bit ^= 0x1; /* Ring has wrapped. */
501 } else
502 rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) ( rx_cmp + 1 );
503
504 if ( ! ( ( rx_cmp_hi->errors_v2 & RX_PKT_CMPL_V2 ) ^ cmpl_bit ) ) {
505 bnxt_rx_process ( dev, bp, rx_cmp, rx_cmp_hi );
506 return SERVICE_NEXT_CQ_BD;
507 } else
509}
510
511void bnxt_mm_init_hwrm ( struct bnxt *bp, const char *func )
512{
513 DBGP ( "%s\n", __func__ );
514 memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
515 memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
516 memset ( bp->hwrm_addr_dma, 0, DMA_BUFFER_SIZE );
517 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
518 bp->hwrm_cmd_timeout = HWRM_CMD_DEFAULT_TIMEOUT;
519 dbg_mem ( bp, func );
520}
521
522void bnxt_mm_init_rings ( struct bnxt *bp, const char *func )
523{
524 DBGP ( "%s\n", __func__ );
525 memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE );
526 memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE );
527 memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE );
528 memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE );
529
530 bp->link_status = STATUS_LINK_DOWN;
531 bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
533 bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
534 bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
535 bp->tx.ring_cnt = MAX_TX_DESC_CNT;
536 bp->rx.ring_cnt = MAX_RX_DESC_CNT;
537 bp->rx.buf_cnt = NUM_RX_BUFFERS;
538 dbg_mem ( bp, func );
539}
540
541void bnxt_mm_nic ( struct bnxt *bp )
542{
543 DBGP ( "%s\n", __func__ );
544 memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE );
545 memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE );
546 memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE );
547 memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE );
548 bp->nq.cons_id = 0;
549 bp->nq.completion_bit = 0x1;
550 bp->nq.epoch = 0;
551 bp->nq.toggle = 0;
552 bp->cq.cons_id = 0;
553 bp->cq.completion_bit = 0x1;
554 bp->cq.epoch = 0;
555 bp->tx.prod_id = 0;
556 bp->tx.cons_id = 0;
557 bp->tx.epoch = 0;
558 bp->rx.cons_id = 0;
559 bp->rx.iob_cnt = 0;
560 bp->rx.epoch = 0;
561
563 bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
564 bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
565 bp->tx.ring_cnt = MAX_TX_DESC_CNT;
566 bp->rx.ring_cnt = MAX_RX_DESC_CNT;
567 bp->rx.buf_cnt = NUM_RX_BUFFERS;
568}
569
571{
572 DBGP ( "%s\n", __func__ );
573 if ( bp->nq.bd_virt ) {
574 dma_free ( &bp->nq_mapping, bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
575 bp->nq.bd_virt = NULL;
576 }
577
578 if ( bp->cq.bd_virt ) {
579 dma_free ( &bp->cq_mapping, bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
580 bp->cq.bd_virt = NULL;
581 }
582
583 if ( bp->rx.bd_virt ) {
584 dma_free ( &bp->rx_mapping, bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
585 bp->rx.bd_virt = NULL;
586 }
587
588 if ( bp->tx.bd_virt ) {
589 dma_free ( &bp->tx_mapping, bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
590 bp->tx.bd_virt = NULL;
591 }
592
593 DBGP ( "- %s ( ): - Done\n", __func__ );
594}
595
596void bnxt_free_hwrm_mem ( struct bnxt *bp )
597{
598 DBGP ( "%s\n", __func__ );
599 if ( bp->hwrm_addr_dma ) {
600 dma_free ( &bp->dma_mapped, bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
601 bp->hwrm_addr_dma = NULL;
602 }
603
604 if ( bp->hwrm_addr_resp ) {
605 dma_free ( &bp->resp_mapping, bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
606 bp->hwrm_addr_resp = NULL;
607 }
608
609 if ( bp->hwrm_addr_req ) {
610 dma_free ( &bp->req_mapping, bp->hwrm_addr_req, REQ_BUFFER_SIZE );
611 bp->hwrm_addr_req = NULL;
612 }
613 DBGP ( "- %s ( ): - Done\n", __func__ );
614}
615
617{
618 DBGP ( "%s\n", __func__ );
619 bp->hwrm_addr_req = dma_alloc ( bp->dma, &bp->req_mapping,
621 bp->hwrm_addr_resp = dma_alloc ( bp->dma, &bp->resp_mapping,
623 bp->hwrm_addr_dma = dma_alloc ( bp->dma, &bp->dma_mapped,
625
626 if ( bp->hwrm_addr_req && bp->hwrm_addr_resp && bp->hwrm_addr_dma) {
627 bnxt_mm_init_hwrm ( bp, __func__ );
628 return STATUS_SUCCESS;
629 }
630
631 DBGP ( "- %s ( ): Failed\n", __func__ );
633 return -ENOMEM;
634}
635
637{
638 DBGP ( "%s\n", __func__ );
639 bp->tx.bd_virt = dma_alloc ( bp->dma, &bp->tx_mapping,
641 bp->rx.bd_virt = dma_alloc ( bp->dma, &bp->rx_mapping,
643 bp->cq.bd_virt = dma_alloc ( bp->dma, &bp->cq_mapping,
645 bp->nq.bd_virt = dma_alloc ( bp->dma, &bp->nq_mapping,
647 if ( bp->tx.bd_virt && bp->rx.bd_virt &&
648 bp->nq.bd_virt && bp->cq.bd_virt) {
649 bnxt_mm_init_rings ( bp, __func__ );
650 return STATUS_SUCCESS;
651 }
652
653 DBGP ( "- %s ( ): Failed\n", __func__ );
655 return -ENOMEM;
656}
657
658static void hwrm_init ( struct bnxt *bp, struct input *req, u16 cmd, u16 len )
659{
660 memset ( req, 0, len );
661 req->req_type = cmd;
664 req->resp_addr = RESP_DMA_ADDR ( bp );
665 req->seq_id = bp->seq_id++;
666}
667
668static void hwrm_write_req ( struct bnxt *bp, void *req, u32 cnt )
669{
670 u32 i = 0;
671
672 for ( i = 0; i < cnt; i++ ) {
673 writel ( ( ( u32 * ) req )[i],
674 ( bp->bar0 + GRC_COM_CHAN_BASE + ( i * 4 ) ) );
675 }
676 writel ( 0x1, ( bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG ) );
677}
678
679static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len, u16 req_type )
680{
681 struct hwrm_short_input sreq;
682
683 memset ( &sreq, 0, sizeof ( struct hwrm_short_input ) );
684 sreq.req_type = req_type;
686 sreq.size = len;
687 sreq.req_addr = REQ_DMA_ADDR ( bp );
688
689 dbg_short_cmd ( ( u8 * ) &sreq, __func__,
690 sizeof ( struct hwrm_short_input ) );
691 /* Ensure request buffer is flushed before writing short command */
692 wmb();
693 hwrm_write_req ( bp, &sreq, sizeof ( struct hwrm_short_input ) / 4 );
694}
695
696static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func )
697{
698 struct input *req = ( struct input * ) REQ_DMA_ADDR ( bp );
699 struct output *resp = ( struct output * ) RESP_DMA_ADDR ( bp );
700 u8 *ptr = ( u8 * )resp;
701 u32 idx;
702 u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER ( ( u32 )tmo );
703 u16 resp_len = 0;
704 u16 ret = STATUS_TIMEOUT;
705
706 if ( ( len > bp->hwrm_max_req_len ) ||
709 else
710 hwrm_write_req ( bp, req, ( u32 ) ( len / 4 ) );
711
712 for ( idx = 0; idx < wait_cnt; idx++ ) {
713 resp_len = resp->resp_len;
714 if ( resp->seq_id == req->seq_id &&
715 resp->req_type == req->req_type &&
716 ptr[resp_len - 1] == 1 ) {
717 bp->last_resp_code = resp->error_code;
718 ret = resp->error_code;
719 break;
720 }
722 }
723 dbg_hw_cmd ( bp, func, len, resp_len, tmo, ret );
724 return ( int )ret;
725}
726
727static int bnxt_hwrm_ver_get ( struct bnxt *bp )
728{
729 u16 cmd_len = ( u16 )sizeof ( struct hwrm_ver_get_input );
730 struct hwrm_ver_get_input *req;
731 struct hwrm_ver_get_output *resp;
732 int rc;
733
734 DBGP ( "%s\n", __func__ );
735 req = ( struct hwrm_ver_get_input * ) REQ_DMA_ADDR ( bp );
736 resp = ( struct hwrm_ver_get_output * ) RESP_DMA_ADDR ( bp );
737 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_VER_GET, cmd_len );
741 rc = wait_resp ( bp, HWRM_CMD_DEFAULT_TIMEOUT, cmd_len, __func__ );
742 if ( rc )
743 return STATUS_FAILURE;
744
745 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
746 resp->hwrm_intf_min_8b << 8 |
747 resp->hwrm_intf_upd_8b;
748 bp->hwrm_cmd_timeout = ( u32 ) resp->def_req_timeout;
749 if ( !bp->hwrm_cmd_timeout )
750 bp->hwrm_cmd_timeout = ( u32 ) HWRM_CMD_DEFAULT_TIMEOUT;
751 if ( resp->hwrm_intf_maj_8b >= 1 )
752 bp->hwrm_max_req_len = resp->max_req_win_len;
753 bp->chip_id = resp->chip_rev << 24 | resp->chip_metal << 16 |
754 resp->chip_bond_id << 8 | resp->chip_platform_type;
755 bp->chip_num = resp->chip_num;
756 if ( resp->dev_caps_cfg & SHORT_CMD_SUPPORTED )
758 if ( resp->dev_caps_cfg & SHORT_CMD_REQUIRED )
760 bp->hwrm_max_ext_req_len = resp->max_ext_req_len;
761 if ( ( bp->chip_num == CHIP_NUM_57508 ) ||
762 ( bp->chip_num == CHIP_NUM_57504 ) ||
763 ( bp->chip_num == CHIP_NUM_57502 ) ) {
764 FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P5 );
766 }
767 if ( bp->chip_num == CHIP_NUM_57608 ) {
768 FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P7 );
770 }
771 dbg_fw_ver ( resp, bp->hwrm_cmd_timeout );
772 return STATUS_SUCCESS;
773}
774
776{
777 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_resource_qcaps_input );
780 int rc;
781
782 DBGP ( "%s\n", __func__ );
783 req = ( struct hwrm_func_resource_qcaps_input * ) REQ_DMA_ADDR ( bp );
784 resp = ( struct hwrm_func_resource_qcaps_output * ) RESP_DMA_ADDR ( bp );
785 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_FUNC_RESOURCE_QCAPS, cmd_len );
786 req->fid = ( u16 ) HWRM_NA_SIGNATURE;
787 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
788 if ( rc != STATUS_SUCCESS )
789 return STATUS_SUCCESS;
790
792
793 // VFs
794 if ( !bp->vf ) {
795 bp->max_vfs = resp->max_vfs;
796 bp->vf_res_strategy = resp->vf_reservation_strategy;
797 }
798
799 // vNICs
800 bp->min_vnics = resp->min_vnics;
801 bp->max_vnics = resp->max_vnics;
802
803 // MSI-X
804 bp->max_msix = resp->max_msix;
805
806 // Ring Groups
807 bp->min_hw_ring_grps = resp->min_hw_ring_grps;
808 bp->max_hw_ring_grps = resp->max_hw_ring_grps;
809
810 // TX Rings
811 bp->min_tx_rings = resp->min_tx_rings;
812 bp->max_tx_rings = resp->max_tx_rings;
813
814 // RX Rings
815 bp->min_rx_rings = resp->min_rx_rings;
816 bp->max_rx_rings = resp->max_rx_rings;
817
818 // Completion Rings
819 bp->min_cp_rings = resp->min_cmpl_rings;
820 bp->max_cp_rings = resp->max_cmpl_rings;
821
822 // RSS Contexts
823 bp->min_rsscos_ctxs = resp->min_rsscos_ctx;
824 bp->max_rsscos_ctxs = resp->max_rsscos_ctx;
825
826 // L2 Contexts
827 bp->min_l2_ctxs = resp->min_l2_ctxs;
828 bp->max_l2_ctxs = resp->max_l2_ctxs;
829
830 // Statistic Contexts
831 bp->min_stat_ctxs = resp->min_stat_ctx;
832 bp->max_stat_ctxs = resp->max_stat_ctx;
834 return STATUS_SUCCESS;
835}
836
837static u32 bnxt_set_ring_info ( struct bnxt *bp )
838{
839 u32 enables = 0;
840
841 DBGP ( "%s\n", __func__ );
842 bp->num_cmpl_rings = DEFAULT_NUMBER_OF_CMPL_RINGS;
843 bp->num_tx_rings = DEFAULT_NUMBER_OF_TX_RINGS;
844 bp->num_rx_rings = DEFAULT_NUMBER_OF_RX_RINGS;
845 bp->num_hw_ring_grps = DEFAULT_NUMBER_OF_RING_GRPS;
846 bp->num_stat_ctxs = DEFAULT_NUMBER_OF_STAT_CTXS;
847
848 if ( bp->min_cp_rings <= DEFAULT_NUMBER_OF_CMPL_RINGS )
849 bp->num_cmpl_rings = bp->min_cp_rings;
850
851 if ( bp->min_tx_rings <= DEFAULT_NUMBER_OF_TX_RINGS )
852 bp->num_tx_rings = bp->min_tx_rings;
853
854 if ( bp->min_rx_rings <= DEFAULT_NUMBER_OF_RX_RINGS )
855 bp->num_rx_rings = bp->min_rx_rings;
856
857 if ( bp->min_hw_ring_grps <= DEFAULT_NUMBER_OF_RING_GRPS )
858 bp->num_hw_ring_grps = bp->min_hw_ring_grps;
859
860 if ( bp->min_stat_ctxs <= DEFAULT_NUMBER_OF_STAT_CTXS )
861 bp->num_stat_ctxs = bp->min_stat_ctxs;
862
863 dbg_num_rings ( bp );
869 return enables;
870}
871
872static void bnxt_hwrm_assign_resources ( struct bnxt *bp )
873{
874 struct hwrm_func_cfg_input *req;
875 u32 enables = 0;
876
877 DBGP ( "%s\n", __func__ );
880
881 req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp );
882 req->num_cmpl_rings = bp->num_cmpl_rings;
883 req->num_tx_rings = bp->num_tx_rings;
884 req->num_rx_rings = bp->num_rx_rings;
885 req->num_stat_ctxs = bp->num_stat_ctxs;
886 req->num_hw_ring_grps = bp->num_hw_ring_grps;
887 req->enables = enables;
888}
889
890static int bnxt_hwrm_func_qcaps_req ( struct bnxt *bp )
891{
892 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcaps_input );
893 struct hwrm_func_qcaps_input *req;
894 struct hwrm_func_qcaps_output *resp;
895 int rc;
896
897 DBGP ( "%s\n", __func__ );
898 if ( bp->vf )
899 return STATUS_SUCCESS;
900
901 req = ( struct hwrm_func_qcaps_input * ) REQ_DMA_ADDR ( bp );
902 resp = ( struct hwrm_func_qcaps_output * ) RESP_DMA_ADDR ( bp );
903 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_FUNC_QCAPS, cmd_len );
904 req->fid = ( u16 ) HWRM_NA_SIGNATURE;
905 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
906 if ( rc ) {
907 DBGP ( "- %s ( ): Failed\n", __func__ );
908 return STATUS_FAILURE;
909 }
910
911 bp->fid = resp->fid;
912 bp->port_idx = ( u8 ) resp->port_id;
913
915 bp->err_rcvry_supported = 1;
916 }
917
918 /* Get MAC address for this PF */
919 memcpy ( &bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN );
920 dbg_func_qcaps ( bp );
921
922 return STATUS_SUCCESS;
923}
924
925static int bnxt_hwrm_func_qcfg_req ( struct bnxt *bp )
926{
927 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_func_qcfg_input );
928 struct hwrm_func_qcfg_input *req;
929 struct hwrm_func_qcfg_output *resp;
930 int rc;
931
932 DBGP ( "%s\n", __func__ );
933 req = ( struct hwrm_func_qcfg_input * ) REQ_DMA_ADDR ( bp );
934 resp = ( struct hwrm_func_qcfg_output * ) RESP_DMA_ADDR ( bp );
935 hwrm_init ( bp, ( void * )req, ( u16 ) HWRM_FUNC_QCFG, cmd_len );
936 req->fid = ( u16 ) HWRM_NA_SIGNATURE;
937 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
938 if ( rc ) {
939 DBGP ( "- %s ( ): Failed\n", __func__ );
940 return STATUS_FAILURE;
941 }
942
944 FLAG_SET ( bp->flags, BNXT_FLAG_MULTI_HOST );
945
946 if ( resp->port_partition_type &
948 FLAG_SET ( bp->flags, BNXT_FLAG_NPAR_MODE );
949
950 bp->ordinal_value = ( u8 ) resp->pci_id & 0x0F;
951 bp->stat_ctx_id = resp->stat_ctx_id;
952
953 /* If VF is set to TRUE, then use some data from func_qcfg ( ). */
954 if ( bp->vf ) {
955 bp->fid = resp->fid;
956 bp->port_idx = ( u8 ) resp->port_id;
957 bp->vlan_id = resp->vlan;
958
959 /* Get MAC address for this VF */
960 memcpy ( bp->mac_addr, resp->mac_address, ETH_ALEN );
961 }
962 dbg_func_qcfg ( bp );
963 return STATUS_SUCCESS;
964}
965
967{
968 u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcaps_input );
969 struct hwrm_port_phy_qcaps_input *req;
970 struct hwrm_port_phy_qcaps_output *resp;
971 int rc;
972
973 DBGP ( "%s\n", __func__ );
974
975 req = ( struct hwrm_port_phy_qcaps_input * ) REQ_DMA_ADDR ( bp );
976 resp = ( struct hwrm_port_phy_qcaps_output * ) RESP_DMA_ADDR ( bp );
977 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_PORT_PHY_QCAPS, cmd_len );
978 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
979 if ( rc ) {
980 DBGP ( "-s %s ( ): Failed\n", __func__ );
981 return STATUS_FAILURE;
982 }
983
986
987 return STATUS_SUCCESS;
988}
989
990static int bnxt_hwrm_func_reset_req ( struct bnxt *bp )
991{
992 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_func_reset_input );
993 struct hwrm_func_reset_input *req;
994
995 DBGP ( "%s\n", __func__ );
996 req = ( struct hwrm_func_reset_input * ) REQ_DMA_ADDR ( bp );
997 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_FUNC_RESET, cmd_len );
998 if ( !bp->vf )
1000
1001 return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1002}
1003
1004static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp )
1005{
1006 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_func_cfg_input );
1007 struct hwrm_func_cfg_input *req;
1008
1009 DBGP ( "%s\n", __func__ );
1010 if ( bp->vf )
1011 return STATUS_SUCCESS;
1012
1013 req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp );
1014 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_FUNC_CFG, cmd_len );
1015 req->fid = ( u16 ) HWRM_NA_SIGNATURE;
1017 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) {
1021 req->num_msix = 1;
1022 req->num_vnics = 1;
1024 }
1025 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1026}
1027
1029{
1032 int rc = 0;
1033 u8 i = 0;
1034 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_error_recovery_qcfg_input );
1035
1036 DBGP ( "%s\n", __func__ );
1037 /* Set default error recovery heartbeat polling value (in 100ms)*/
1038 bp->er.drv_poll_freq = 100;
1039 if ( ! ( bp->err_rcvry_supported ) ) {
1040 return STATUS_SUCCESS;
1041 }
1042
1043 req = ( struct hwrm_error_recovery_qcfg_input * ) REQ_DMA_ADDR ( bp );
1044 resp = ( struct hwrm_error_recovery_qcfg_output * ) RESP_DMA_ADDR ( bp );
1045
1046 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_ER_QCFG, cmd_len );
1047
1048 rc = wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1049 if ( rc ) {
1050 DBGP ( "- %s ( ): Failed\n", __func__ );
1051 return STATUS_FAILURE;
1052 }
1053
1054 bp->er.flags = resp->flags;
1055 bp->er.drv_poll_freq = resp->driver_polling_freq;
1056 bp->er.master_wait_period = resp->master_wait_period;
1057 bp->er.normal_wait_period = resp->normal_wait_period;
1058 bp->er.master_wait_post_rst = resp->master_wait_post_reset;
1059 bp->er.max_bailout_post_rst = resp->max_bailout_time;
1060
1061 bp->er.fw_status_reg = resp->fw_health_status_reg;
1062 bp->er.fw_hb_reg = resp->fw_heartbeat_reg;
1063 bp->er.fw_rst_cnt_reg = resp->fw_reset_cnt_reg;
1064 bp->er.recvry_cnt_reg = resp->err_recovery_cnt_reg;
1065 bp->er.rst_inprg_reg = resp->reset_inprogress_reg;
1066
1067 bp->er.rst_inprg_reg_mask = resp->reset_inprogress_reg_mask;
1068 bp->er.reg_array_cnt = resp->reg_array_cnt;
1069
1070 DBGP ( "flags = 0x%x\n", resp->flags );
1071 DBGP ( "driver_polling_freq = 0x%x\n", resp->driver_polling_freq );
1072 DBGP ( "master_wait_period = 0x%x\n", resp->master_wait_period );
1073 DBGP ( "normal_wait_period = 0x%x\n", resp->normal_wait_period );
1074 DBGP ( "wait_post_reset = 0x%x\n", resp->master_wait_post_reset );
1075 DBGP ( "bailout_post_reset = 0x%x\n", resp->max_bailout_time );
1076 DBGP ( "reg_array_cnt = %x\n", resp->reg_array_cnt );
1077
1078 for ( i = 0; i < resp->reg_array_cnt; i++ ) {
1079 bp->er.rst_reg[i] = resp->reset_reg[i];
1080 bp->er.rst_reg_val[i] = resp->reset_reg_val[i];
1081 bp->er.delay_after_rst[i] = resp->delay_after_reset[i];
1082
1083 DBGP ( "rst_reg = %x ", bp->er.rst_reg[i] );
1084 DBGP ( "rst_reg_val = %x ", bp->er.rst_reg_val[i] );
1085 DBGP ( "rst_after_reset = %x\n", bp->er.delay_after_rst[i] );
1086 }
1087
1088 return STATUS_SUCCESS;
1089}
1090
1091static int bnxt_hwrm_func_drv_rgtr ( struct bnxt *bp )
1092{
1093 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_func_drv_rgtr_input );
1094 struct hwrm_func_drv_rgtr_input *req;
1095 int rc;
1096
1097 DBGP ( "%s\n", __func__ );
1098 req = ( struct hwrm_func_drv_rgtr_input * ) REQ_DMA_ADDR ( bp );
1099 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_FUNC_DRV_RGTR, cmd_len );
1100
1101 /* Register with HWRM */
1106
1111
1112 if ( bp->err_rcvry_supported ) {
1117 }
1118
1123 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1124 if ( rc ) {
1125 DBGP ( "- %s ( ): Failed\n", __func__ );
1126 return STATUS_FAILURE;
1127 }
1128
1129 FLAG_SET ( bp->flag_hwrm, VALID_DRIVER_REG );
1130 return STATUS_SUCCESS;
1131}
1132
1133static int bnxt_hwrm_func_drv_unrgtr ( struct bnxt *bp )
1134{
1135 u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_unrgtr_input );
1136 struct hwrm_func_drv_unrgtr_input *req;
1137 int rc;
1138
1139 DBGP ( "%s\n", __func__ );
1140 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_DRIVER_REG ) ) )
1141 return STATUS_SUCCESS;
1142
1143 req = ( struct hwrm_func_drv_unrgtr_input * ) REQ_DMA_ADDR ( bp );
1144 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_FUNC_DRV_UNRGTR, cmd_len );
1146 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1147 if ( rc )
1148 return STATUS_FAILURE;
1149
1150 FLAG_RESET ( bp->flag_hwrm, VALID_DRIVER_REG );
1151 return STATUS_SUCCESS;
1152}
1153
1154static int bnxt_hwrm_set_async_event ( struct bnxt *bp )
1155{
1156 int rc;
1157 u16 idx;
1158
1159 DBGP ( "%s\n", __func__ );
1160 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) )
1161 idx = bp->nq_ring_id;
1162 else
1163 idx = bp->cq_ring_id;
1164 if ( bp->vf ) {
1165 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_func_vf_cfg_input );
1166 struct hwrm_func_vf_cfg_input *req;
1167
1168 req = ( struct hwrm_func_vf_cfg_input * ) REQ_DMA_ADDR ( bp );
1169 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_FUNC_VF_CFG,
1170 cmd_len );
1172 req->async_event_cr = idx;
1173 req->mtu = bp->mtu;
1174 req->guest_vlan = bp->vlan_id;
1175 memcpy ( ( char * ) &req->dflt_mac_addr[0], bp->mac_addr,
1176 ETH_ALEN );
1177 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1178 } else {
1179 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_func_cfg_input );
1180 struct hwrm_func_cfg_input *req;
1181
1182 req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp );
1183 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_FUNC_CFG, cmd_len );
1184 req->fid = ( u16 ) HWRM_NA_SIGNATURE;
1186 req->async_event_cr = idx;
1187 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1188 }
1189 return rc;
1190}
1191
1193{
1194 u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_alloc_input );
1197 int rc;
1199 u32 enables;
1200
1201 DBGP ( "%s\n", __func__ );
1202 req = ( struct hwrm_cfa_l2_filter_alloc_input * ) REQ_DMA_ADDR ( bp );
1203 resp = ( struct hwrm_cfa_l2_filter_alloc_output * ) RESP_DMA_ADDR ( bp );
1204 if ( bp->vf )
1209
1210 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_CFA_L2_FILTER_ALLOC,
1211 cmd_len );
1212 req->flags = flags;
1213 req->enables = enables;
1214 memcpy ( ( char * ) &req->l2_addr[0], ( char * ) &bp->mac_addr[0],
1215 ETH_ALEN );
1216 memset ( ( char * ) &req->l2_addr_mask[0], 0xff, ETH_ALEN );
1217 if ( !bp->vf ) {
1218 memcpy ( ( char * ) &req->t_l2_addr[0], bp->mac_addr, ETH_ALEN );
1219 memset ( ( char * ) &req->t_l2_addr_mask[0], 0xff, ETH_ALEN );
1220 }
1222 req->src_id = ( u32 ) bp->port_idx;
1223 req->dst_id = bp->vnic_id;
1224 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1225 if ( rc )
1226 return STATUS_FAILURE;
1227
1228 FLAG_SET ( bp->flag_hwrm, VALID_L2_FILTER );
1229 bp->l2_filter_id = resp->l2_filter_id;
1230 return STATUS_SUCCESS;
1231}
1232
1234{
1235 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_cfa_l2_filter_free_input );
1237 int rc;
1238
1239 DBGP ( "%s\n", __func__ );
1240 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_L2_FILTER ) ) )
1241 return STATUS_SUCCESS;
1242
1243 req = ( struct hwrm_cfa_l2_filter_free_input * ) REQ_DMA_ADDR ( bp );
1244 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_CFA_L2_FILTER_FREE,
1245 cmd_len );
1246 req->l2_filter_id = bp->l2_filter_id;
1247 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1248 if ( rc ) {
1249 DBGP ( "- %s ( ): Failed\n", __func__ );
1250 return STATUS_FAILURE;
1251 }
1252
1253 FLAG_RESET ( bp->flag_hwrm, VALID_L2_FILTER );
1254 return STATUS_SUCCESS;
1255}
1256
1258{
1259 u32 mask = 0;
1260
1261 if ( !rx_mask )
1262 return mask;
1263
1265 if ( rx_mask != RX_MASK_ACCEPT_NONE ) {
1266 if ( rx_mask & RX_MASK_ACCEPT_MULTICAST )
1268 if ( rx_mask & RX_MASK_ACCEPT_ALL_MULTICAST )
1270 if ( rx_mask & RX_MASK_PROMISCUOUS_MODE )
1272 }
1273 return mask;
1274}
1275
1276static int bnxt_hwrm_set_rx_mask ( struct bnxt *bp, u32 rx_mask )
1277{
1278 u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_set_rx_mask_input );
1280 u32 mask = set_rx_mask ( rx_mask );
1281
1282 req = ( struct hwrm_cfa_l2_set_rx_mask_input * ) REQ_DMA_ADDR ( bp );
1283 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_CFA_L2_SET_RX_MASK,
1284 cmd_len );
1285 req->vnic_id = bp->vnic_id;
1286 req->mask = mask;
1287
1288 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1289}
1290
1291static int bnxt_hwrm_port_phy_qcfg ( struct bnxt *bp, u16 idx )
1292{
1293 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_port_phy_qcfg_input );
1294 struct hwrm_port_phy_qcfg_input *req;
1295 struct hwrm_port_phy_qcfg_output *resp;
1296 int rc;
1297
1298 DBGP ( "%s\n", __func__ );
1299 req = ( struct hwrm_port_phy_qcfg_input * ) REQ_DMA_ADDR ( bp );
1300 resp = ( struct hwrm_port_phy_qcfg_output * ) RESP_DMA_ADDR ( bp );
1301 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_PORT_PHY_QCFG, cmd_len );
1302 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1303 if ( rc ) {
1304 DBGP ( "- %s ( ): Failed\n", __func__ );
1305 return STATUS_FAILURE;
1306 }
1307
1308 if ( idx & SUPPORT_SPEEDS )
1309 bp->support_speeds = resp->support_speeds;
1310
1311 if ( idx & SUPPORT_SPEEDS2 )
1312 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) )
1313 bp->auto_link_speeds2_mask = resp->auto_link_speeds2;
1314
1315 if ( idx & DETECT_MEDIA )
1316 bp->media_detect = resp->module_status;
1317
1318 if ( idx & PHY_SPEED )
1319 bp->current_link_speed = resp->link_speed;
1320
1321 if ( idx & PHY_STATUS ) {
1322 if ( resp->link == PORT_PHY_QCFG_RESP_LINK_LINK )
1323 bp->link_status = STATUS_LINK_ACTIVE;
1324 else
1325 bp->link_status = STATUS_LINK_DOWN;
1326 }
1327 return STATUS_SUCCESS;
1328}
1329
1331 u16 data_len, u16 option_num, u16 dimensions, u16 index_0 )
1332{
1333 u16 cmd_len = ( u16 )sizeof ( struct hwrm_nvm_get_variable_input );
1334 struct hwrm_nvm_get_variable_input *req;
1335
1336 DBGP ( "%s\n", __func__ );
1337 req = ( struct hwrm_nvm_get_variable_input * ) REQ_DMA_ADDR ( bp );
1338 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_NVM_GET_VARIABLE, cmd_len );
1339 req->dest_data_addr = DMA_DMA_ADDR ( bp );
1340 req->data_len = data_len;
1341 req->option_num = option_num;
1342 req->dimensions = dimensions;
1343 req->index_0 = index_0;
1344 return wait_resp ( bp,
1345 HWRM_CMD_FLASH_MULTIPLAYER ( bp->hwrm_cmd_timeout ),
1346 cmd_len, __func__ );
1347}
1348
1349static int bnxt_get_link_speed ( struct bnxt *bp )
1350{
1351 u32 *ptr32 = ( u32 * ) DMA_DMA_ADDR ( bp );
1352
1353 DBGP ( "%s\n", __func__ );
1354 if ( ! ( FLAG_TEST (bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) ) {
1357 1, ( u16 ) bp->port_idx ) != STATUS_SUCCESS )
1358 return STATUS_FAILURE;
1359 bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT );
1362 ( u16 ) bp->port_idx ) != STATUS_SUCCESS )
1363 return STATUS_FAILURE;
1364 bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK,
1366 }
1368 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1369 return STATUS_FAILURE;
1370 bp->link_set |= SET_LINK ( *ptr32, SPEED_FW_MASK, SPEED_FW_SHIFT );
1373 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1374 return STATUS_FAILURE;
1375 bp->link_set |= SET_LINK ( *ptr32, MEDIA_AUTO_DETECT_MASK,
1377
1378 /* Use LINK_SPEED_FW_xxx which is valid for CHIP_P7 and earlier devices */
1379 switch ( bp->link_set & LINK_SPEED_FW_MASK ) {
1380 case LINK_SPEED_FW_1G:
1382 break;
1383 case LINK_SPEED_FW_2_5G:
1385 break;
1386 case LINK_SPEED_FW_10G:
1388 break;
1389 case LINK_SPEED_FW_25G:
1391 break;
1392 case LINK_SPEED_FW_40G:
1394 break;
1395 case LINK_SPEED_FW_50G:
1397 break;
1400 break;
1401 case LINK_SPEED_FW_100G:
1403 break;
1406 break;
1409 break;
1410 case LINK_SPEED_FW_200G:
1412 break;
1415 break;
1418 break;
1421 break;
1424 break;
1425 default:
1427 break;
1428 }
1429 prn_set_speed ( bp->link_set );
1430 return STATUS_SUCCESS;
1431}
1432
1434{
1435 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_func_backing_store_qcfg_input );
1437
1438 DBGP ( "%s\n", __func__ );
1439 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) || bp->vf )
1440 return STATUS_SUCCESS;
1441
1442 req = ( struct hwrm_func_backing_store_qcfg_input * ) REQ_DMA_ADDR ( bp );
1443 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_FUNC_BACKING_STORE_QCFG,
1444 cmd_len );
1445 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1446}
1447
1449{
1450 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_func_backing_store_cfg_input );
1452
1453 DBGP ( "%s\n", __func__ );
1454 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) || bp->vf )
1455 return STATUS_SUCCESS;
1456
1457 req = ( struct hwrm_func_backing_store_cfg_input * ) REQ_DMA_ADDR ( bp );
1458 hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_CFG,
1459 cmd_len );
1461 req->enables = 0;
1462 return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1463}
1464
1465static int bnxt_hwrm_queue_qportcfg ( struct bnxt *bp )
1466{
1467 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_queue_qportcfg_input );
1468 struct hwrm_queue_qportcfg_input *req;
1469 struct hwrm_queue_qportcfg_output *resp;
1470 int rc;
1471
1472 DBGP ( "%s\n", __func__ );
1473
1474 req = ( struct hwrm_queue_qportcfg_input * ) REQ_DMA_ADDR ( bp );
1475 resp = ( struct hwrm_queue_qportcfg_output * ) RESP_DMA_ADDR ( bp );
1476 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_QUEUE_QPORTCFG, cmd_len );
1477 req->flags = 0;
1478 req->port_id = bp->port_idx;
1479 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1480 if ( rc ) {
1481 DBGP ( "- %s ( ): Failed\n", __func__ );
1482 return STATUS_FAILURE;
1483 }
1484
1485 bp->queue_id = resp->queue_id0;
1486 return STATUS_SUCCESS;
1487}
1488
1489static int bnxt_hwrm_port_mac_cfg ( struct bnxt *bp )
1490{
1491 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_port_mac_cfg_input );
1492 struct hwrm_port_mac_cfg_input *req;
1493
1494 DBGP ( "%s\n", __func__ );
1495 if ( bp->vf )
1496 return STATUS_SUCCESS;
1497
1498 req = ( struct hwrm_port_mac_cfg_input * ) REQ_DMA_ADDR ( bp );
1499 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_PORT_MAC_CFG, cmd_len );
1501 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1502}
1503
1504static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp )
1505{
1506 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_port_phy_cfg_input );
1507 struct hwrm_port_phy_cfg_input *req;
1508 u32 flags;
1509 u32 enables = 0;
1515 u8 auto_mode = 0;
1516 u8 auto_pause = 0;
1517 u8 auto_duplex = 0;
1518
1519 DBGP ( "%s\n", __func__ );
1520 req = ( struct hwrm_port_phy_cfg_input * ) REQ_DMA_ADDR ( bp );
1523
1524 switch ( GET_MEDIUM_SPEED ( bp->medium ) ) {
1527 break;
1529 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1532 } else {
1534 }
1535 break;
1537 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1540 } else {
1542 }
1543 break;
1545 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1548 } else {
1550 }
1551 break;
1553 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1556 } else {
1558 }
1559 break;
1561 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1564 } else {
1567 }
1568 break;
1570 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1573 } else {
1575 }
1576 break;
1578 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1581 } else {
1584 }
1585 break;
1587 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1590 }
1591 break;
1593 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1596 } else {
1599 }
1600 break;
1602 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1605 }
1606 break;
1608 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1611 }
1612 break;
1614 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1617 }
1618 break;
1619 default:
1625 if ( FLAG_TEST (bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) )
1627 else
1632 auto_link_speed_mask = bp->support_speeds;
1633 auto_link_speeds2_mask = bp->auto_link_speeds2_mask;
1634 break;
1635 }
1636
1637 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_PORT_PHY_CFG, cmd_len );
1638 req->flags = flags;
1639 req->enables = enables;
1640 req->port_id = bp->port_idx;
1644 req->auto_mode = auto_mode;
1645 req->auto_duplex = auto_duplex;
1646 req->auto_pause = auto_pause;
1649
1650 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1651}
1652
1653static int bnxt_query_phy_link ( struct bnxt *bp )
1654{
1656
1657 DBGP ( "%s\n", __func__ );
1658 /* Query Link Status */
1660 return STATUS_FAILURE;
1661
1662 if ( bp->link_status == STATUS_LINK_ACTIVE )
1663 return STATUS_SUCCESS;
1664
1665 /* If VF is set to TRUE, Do not issue the following commands */
1666 if ( bp->vf )
1667 return STATUS_SUCCESS;
1668
1669 /* If multi_host or NPAR, Do not issue bnxt_get_link_speed */
1670 if ( FLAG_TEST ( bp->flags, PORT_PHY_FLAGS ) ) {
1671 dbg_flags ( __func__, bp->flags );
1672 return STATUS_SUCCESS;
1673 }
1674
1675 /* HWRM_NVM_GET_VARIABLE - speed */
1677 return STATUS_FAILURE;
1678
1679 /* Configure link if it is not up */
1681
1682 /* refresh link speed values after bringing link up */
1683 return bnxt_hwrm_port_phy_qcfg ( bp, flag );
1684}
1685
1686static int bnxt_get_phy_link ( struct bnxt *bp )
1687{
1688 u16 i;
1690
1691 DBGP ( "%s\n", __func__ );
1692 dbg_chip_info ( bp );
1693 for ( i = 0; i < ( bp->wait_link_timeout / 100 ); i++ ) {
1695 break;
1696
1697 if ( bp->link_status == STATUS_LINK_ACTIVE )
1698 break;
1699
1700// if ( bp->media_detect )
1701// break;
1703 }
1704 dbg_link_state ( bp, ( u32 ) ( ( i + 1 ) * 100 ) );
1705 if ( !bp->er.er_rst_on )
1706 bnxt_set_link ( bp );
1707
1708 return STATUS_SUCCESS;
1709}
1710
1711static int bnxt_hwrm_stat_ctx_alloc ( struct bnxt *bp )
1712{
1713 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_stat_ctx_alloc_input );
1714 struct hwrm_stat_ctx_alloc_input *req;
1715 struct hwrm_stat_ctx_alloc_output *resp;
1716 int rc;
1717
1718 DBGP ( "%s\n", __func__ );
1719 req = ( struct hwrm_stat_ctx_alloc_input * ) REQ_DMA_ADDR ( bp );
1720 resp = ( struct hwrm_stat_ctx_alloc_output * ) RESP_DMA_ADDR ( bp );
1721 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_STAT_CTX_ALLOC, cmd_len );
1722 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1723 if ( rc ) {
1724 DBGP ( "- %s ( ): Failed\n", __func__ );
1725 return STATUS_FAILURE;
1726 }
1727
1728 FLAG_SET ( bp->flag_hwrm, VALID_STAT_CTX );
1729 bp->stat_ctx_id = ( u16 )resp->stat_ctx_id;
1730 return STATUS_SUCCESS;
1731}
1732
1733static int bnxt_hwrm_stat_ctx_free ( struct bnxt *bp )
1734{
1735 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_stat_ctx_free_input );
1736 struct hwrm_stat_ctx_free_input *req;
1737 int rc;
1738
1739 DBGP ( "%s\n", __func__ );
1740 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_STAT_CTX ) ) )
1741 return STATUS_SUCCESS;
1742
1743 req = ( struct hwrm_stat_ctx_free_input * ) REQ_DMA_ADDR ( bp );
1744 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_STAT_CTX_FREE, cmd_len );
1745 req->stat_ctx_id = ( u32 ) bp->stat_ctx_id;
1746 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1747 if ( rc ) {
1748 DBGP ( "- %s ( ): Failed\n", __func__ );
1749 return STATUS_FAILURE;
1750 }
1751
1752 FLAG_RESET ( bp->flag_hwrm, VALID_STAT_CTX );
1753 return STATUS_SUCCESS;
1754}
1755
1756static int bnxt_hwrm_ring_free_grp ( struct bnxt *bp )
1757{
1758 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_ring_grp_free_input );
1759 struct hwrm_ring_grp_free_input *req;
1760 int rc;
1761
1762 DBGP ( "%s\n", __func__ );
1763 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_GRP ) ) )
1764 return STATUS_SUCCESS;
1765
1766 req = ( struct hwrm_ring_grp_free_input * ) REQ_DMA_ADDR ( bp );
1767 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_RING_GRP_FREE, cmd_len );
1768 req->ring_group_id = ( u32 ) bp->ring_grp_id;
1769 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1770 if ( rc ) {
1771 DBGP ( "- %s ( ): Failed\n", __func__ );
1772 return STATUS_FAILURE;
1773 }
1774
1775 FLAG_RESET ( bp->flag_hwrm, VALID_RING_GRP );
1776 return STATUS_SUCCESS;
1777}
1778
1779static int bnxt_hwrm_ring_alloc_grp ( struct bnxt *bp )
1780{
1781 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_ring_grp_alloc_input );
1782 struct hwrm_ring_grp_alloc_input *req;
1783 struct hwrm_ring_grp_alloc_output *resp;
1784 int rc;
1785
1786 DBGP ( "%s\n", __func__ );
1787 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) )
1788 return STATUS_SUCCESS;
1789
1790 req = ( struct hwrm_ring_grp_alloc_input * ) REQ_DMA_ADDR ( bp );
1791 resp = ( struct hwrm_ring_grp_alloc_output * ) RESP_DMA_ADDR ( bp );
1792 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_RING_GRP_ALLOC, cmd_len );
1793 req->cr = bp->cq_ring_id;
1794 req->rr = bp->rx_ring_id;
1795 req->ar = ( u16 )HWRM_NA_SIGNATURE;
1796 if ( bp->vf )
1797 req->sc = bp->stat_ctx_id;
1798
1799 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1800 if ( rc ) {
1801 DBGP ( "- %s ( ): Failed\n", __func__ );
1802 return STATUS_FAILURE;
1803 }
1804
1805 FLAG_SET ( bp->flag_hwrm, VALID_RING_GRP );
1806 bp->ring_grp_id = ( u16 ) resp->ring_group_id;
1807 return STATUS_SUCCESS;
1808}
1809
1810int bnxt_hwrm_ring_free ( struct bnxt *bp, u16 ring_id, u8 ring_type )
1811{
1812 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_ring_free_input );
1813 struct hwrm_ring_free_input *req;
1814
1815 DBGP ( "%s\n", __func__ );
1816 req = ( struct hwrm_ring_free_input * ) REQ_DMA_ADDR ( bp );
1817 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_RING_FREE, cmd_len );
1818 req->ring_type = ring_type;
1819 req->ring_id = ring_id;
1820 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1821}
1822
1823static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type )
1824{
1825 u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_alloc_input );
1826 struct hwrm_ring_alloc_input *req;
1827 struct hwrm_ring_alloc_output *resp;
1828 int rc;
1829
1830 DBGP ( "%s\n", __func__ );
1831 req = ( struct hwrm_ring_alloc_input * ) REQ_DMA_ADDR ( bp );
1832 resp = ( struct hwrm_ring_alloc_output * ) RESP_DMA_ADDR ( bp );
1833 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_RING_ALLOC, cmd_len );
1834 req->ring_type = type;
1835 switch ( type ) {
1837 req->page_size = LM_PAGE_BITS ( 12 );
1838 req->int_mode = BNXT_CQ_INTR_MODE ( ( (FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7) ) || bp->vf ) );
1839 req->length = ( u32 ) bp->nq.ring_cnt;
1840 req->logical_id = 0xFFFF; // Required value for Thor FW?
1841 req->page_tbl_addr = NQ_DMA_ADDR ( bp );
1842 break;
1844 req->page_size = LM_PAGE_BITS ( 8 );
1845 req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
1846 req->length = ( u32 ) bp->cq.ring_cnt;
1847 req->page_tbl_addr = CQ_DMA_ADDR ( bp );
1848 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1849 break;
1851 req->nq_ring_id = bp->nq_ring_id;
1852 req->cq_handle = ( u64 ) bp->nq_ring_id;
1853 break;
1855 req->page_size = LM_PAGE_BITS ( 8 );
1857 req->length = ( u32 ) bp->tx.ring_cnt;
1858 req->queue_id = ( u16 ) bp->queue_id;
1859 req->stat_ctx_id = ( u32 ) bp->stat_ctx_id;
1860 req->cmpl_ring_id = bp->cq_ring_id;
1861 req->page_tbl_addr = TX_DMA_ADDR ( bp );
1862 break;
1864 req->page_size = LM_PAGE_BITS ( 8 );
1866 req->length = ( u32 ) bp->rx.ring_cnt;
1867 req->stat_ctx_id = ( u32 ) STAT_CTX_ID;
1868 req->cmpl_ring_id = bp->cq_ring_id;
1869 req->page_tbl_addr = RX_DMA_ADDR ( bp );
1870 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1871 break;
1872 req->queue_id = ( u16 ) RX_RING_QID;
1875 break;
1876 default:
1877 return STATUS_SUCCESS;
1878 }
1879 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1880 if ( rc ) {
1881 DBGP ( "- %s ( ): Failed, type = %x\n", __func__, type );
1882 return STATUS_FAILURE;
1883 }
1884
1886 FLAG_SET ( bp->flag_hwrm, VALID_RING_CQ );
1887 bp->cq_ring_id = resp->ring_id;
1888 } else if ( type == RING_ALLOC_REQ_RING_TYPE_TX ) {
1889 FLAG_SET ( bp->flag_hwrm, VALID_RING_TX );
1890 bp->tx_ring_id = resp->ring_id;
1891 } else if ( type == RING_ALLOC_REQ_RING_TYPE_RX ) {
1892 FLAG_SET ( bp->flag_hwrm, VALID_RING_RX );
1893 bp->rx_ring_id = resp->ring_id;
1894 } else if ( type == RING_ALLOC_REQ_RING_TYPE_NQ ) {
1895 FLAG_SET ( bp->flag_hwrm, VALID_RING_NQ );
1896 bp->nq_ring_id = resp->ring_id;
1897 }
1898 return STATUS_SUCCESS;
1899}
1900
1901static int bnxt_hwrm_ring_alloc_cq ( struct bnxt *bp )
1902{
1903 DBGP ( "%s\n", __func__ );
1905}
1906
1907static int bnxt_hwrm_ring_alloc_tx ( struct bnxt *bp )
1908{
1909 DBGP ( "%s\n", __func__ );
1911}
1912
1913static int bnxt_hwrm_ring_alloc_rx ( struct bnxt *bp )
1914{
1915 DBGP ( "%s\n", __func__ );
1917}
1918
1919static int bnxt_hwrm_ring_free_cq ( struct bnxt *bp )
1920{
1921 int ret = STATUS_SUCCESS;
1922
1923 DBGP ( "%s\n", __func__ );
1924 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_CQ ) ) )
1925 return ret;
1926
1927 ret = RING_FREE ( bp, bp->cq_ring_id, RING_FREE_REQ_RING_TYPE_L2_CMPL );
1928 if ( ret == STATUS_SUCCESS )
1929 FLAG_RESET ( bp->flag_hwrm, VALID_RING_CQ );
1930
1931 return ret;
1932}
1933
1934static int bnxt_hwrm_ring_free_tx ( struct bnxt *bp )
1935{
1936 int ret = STATUS_SUCCESS;
1937
1938 DBGP ( "%s\n", __func__ );
1939 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_TX ) ) )
1940 return ret;
1941
1942 ret = RING_FREE ( bp, bp->tx_ring_id, RING_FREE_REQ_RING_TYPE_TX );
1943 if ( ret == STATUS_SUCCESS )
1944 FLAG_RESET ( bp->flag_hwrm, VALID_RING_TX );
1945
1946 return ret;
1947}
1948
1949static int bnxt_hwrm_ring_free_rx ( struct bnxt *bp )
1950{
1951 int ret = STATUS_SUCCESS;
1952
1953 DBGP ( "%s\n", __func__ );
1954 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_RX ) ) )
1955 return ret;
1956
1957 ret = RING_FREE ( bp, bp->rx_ring_id, RING_FREE_REQ_RING_TYPE_RX );
1958 if ( ret == STATUS_SUCCESS )
1959 FLAG_RESET ( bp->flag_hwrm, VALID_RING_RX );
1960
1961 return ret;
1962}
1963
1964static int bnxt_hwrm_ring_alloc_nq ( struct bnxt *bp )
1965{
1966 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1967 return STATUS_SUCCESS;
1969}
1970
1971static int bnxt_hwrm_ring_free_nq ( struct bnxt *bp )
1972{
1973 int ret = STATUS_SUCCESS;
1974
1975 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1976 return STATUS_SUCCESS;
1977
1978 DBGP ( "%s\n", __func__ );
1979 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_NQ ) ) )
1980 return ret;
1981
1982 ret = RING_FREE ( bp, bp->nq_ring_id, RING_FREE_REQ_RING_TYPE_NQ );
1983 if ( ret == STATUS_SUCCESS )
1984 FLAG_RESET ( bp->flag_hwrm, VALID_RING_NQ );
1985
1986 return ret;
1987}
1988
1989static int bnxt_hwrm_vnic_alloc ( struct bnxt *bp )
1990{
1991 u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_alloc_input );
1992 struct hwrm_vnic_alloc_input *req;
1993 struct hwrm_vnic_alloc_output *resp;
1994 int rc;
1995
1996 DBGP ( "%s\n", __func__ );
1997 req = ( struct hwrm_vnic_alloc_input * ) REQ_DMA_ADDR ( bp );
1998 resp = ( struct hwrm_vnic_alloc_output * ) RESP_DMA_ADDR ( bp );
1999 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_VNIC_ALLOC, cmd_len );
2001 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
2002 if ( rc ) {
2003 DBGP ( "- %s ( ): Failed\n", __func__ );
2004 return STATUS_FAILURE;
2005 }
2006
2007 FLAG_SET ( bp->flag_hwrm, VALID_VNIC_ID );
2008 bp->vnic_id = resp->vnic_id;
2009 return STATUS_SUCCESS;
2010}
2011
2012static int bnxt_hwrm_vnic_free ( struct bnxt *bp )
2013{
2014 u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_free_input );
2015 struct hwrm_vnic_free_input *req;
2016 int rc;
2017
2018 DBGP ( "%s\n", __func__ );
2019 if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_VNIC_ID ) ) )
2020 return STATUS_SUCCESS;
2021
2022 req = ( struct hwrm_vnic_free_input * ) REQ_DMA_ADDR ( bp );
2023 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_VNIC_FREE, cmd_len );
2024 req->vnic_id = bp->vnic_id;
2025 rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
2026 if ( rc ) {
2027 DBGP ( "- %s ( ): Failed\n", __func__ );
2028 return STATUS_FAILURE;
2029 }
2030
2031 FLAG_RESET ( bp->flag_hwrm, VALID_VNIC_ID );
2032 return STATUS_SUCCESS;
2033}
2034
2035static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp )
2036{
2037 u16 cmd_len = ( u16 ) sizeof ( struct hwrm_vnic_cfg_input );
2038 struct hwrm_vnic_cfg_input *req;
2039
2040 DBGP ( "%s\n", __func__ );
2041 req = ( struct hwrm_vnic_cfg_input * ) REQ_DMA_ADDR ( bp );
2042 hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_VNIC_CFG, cmd_len );
2044 req->mru = bp->mtu;
2045
2046 if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) {
2049 req->default_rx_ring_id = bp->rx_ring_id;
2050 req->default_cmpl_ring_id = bp->cq_ring_id;
2051 } else {
2053 req->dflt_ring_grp = bp->ring_grp_id;
2054 }
2055
2056 req->vnic_id = bp->vnic_id;
2057 return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
2058}
2059
2060static int bnxt_set_rx_mask ( struct bnxt *bp )
2061{
2062 return bnxt_hwrm_set_rx_mask ( bp, RX_MASK );
2063}
2064
2065static int bnxt_reset_rx_mask ( struct bnxt *bp )
2066{
2067 return bnxt_hwrm_set_rx_mask ( bp, 0 );
2068}
2069
2070static int bnxt_get_link_state ( struct bnxt *bp )
2071{
2072 int rc = 0;
2073
2074 DBGP ( "%s \n", __func__ );
2076
2077 return rc;
2078}
2079
2080typedef int ( *hwrm_func_t ) ( struct bnxt *bp );
2081
2083 bnxt_hwrm_ver_get, /* HWRM_VER_GET */
2084 bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */
2085 bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */
2086 bnxt_get_device_address, /* HW MAC address */
2088 NULL
2089};
2090
2092 bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */
2093 NULL,
2094};
2095
2097 bnxt_hwrm_cfa_l2_filter_free, /* HWRM_CFA_L2_FILTER_FREE */
2099 bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
2100 bnxt_free_rx_iob, /* HWRM_FREE_IOB */
2101 bnxt_hwrm_vnic_free, /* HWRM_VNIC_FREE */
2102 bnxt_hwrm_ring_free_grp, /* HWRM_RING_GRP_FREE */
2103 bnxt_hwrm_ring_free_rx, /* HWRM_RING_FREE - RX Ring */
2104 bnxt_hwrm_ring_free_tx, /* HWRM_RING_FREE - TX Ring */
2105 bnxt_hwrm_stat_ctx_free, /* HWRM_STAT_CTX_FREE */
2106 bnxt_hwrm_ring_free_cq, /* HWRM_RING_FREE - CQ Ring */
2107 bnxt_hwrm_ring_free_nq, /* HWRM_RING_FREE - NQ Ring */
2108 bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */
2109 NULL,
2110};
2112 bnxt_hwrm_ver_get, /* HWRM_VER_GET */
2113 bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */
2114 bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */
2115 bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */
2116 bnxt_hwrm_error_recovery_req, /* HWRM_ERROR_RECOVERY_REQ */
2117 bnxt_hwrm_backing_store_cfg, /* HWRM_FUNC_BACKING_STORE_CFG */
2118 bnxt_hwrm_backing_store_qcfg, /* HWRM_FUNC_BACKING_STORE_QCFG */
2119 bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */
2120 bnxt_hwrm_port_phy_qcaps_req, /* HWRM_PORT_PHY_QCAPS */
2121 bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */
2122 bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */
2123 bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG */
2124 bnxt_query_phy_link, /* HWRM_PORT_PHY_QCFG */
2125 bnxt_get_device_address, /* HW MAC address */
2126 NULL,
2127};
2128
2130 bnxt_hwrm_stat_ctx_alloc, /* HWRM_STAT_CTX_ALLOC */
2131 bnxt_hwrm_queue_qportcfg, /* HWRM_QUEUE_QPORTCFG */
2132 bnxt_hwrm_ring_alloc_nq, /* HWRM_RING_ALLOC - NQ Ring */
2133 bnxt_hwrm_ring_alloc_cq, /* HWRM_RING_ALLOC - CQ Ring */
2134 bnxt_hwrm_ring_alloc_tx, /* HWRM_RING_ALLOC - TX Ring */
2135 bnxt_hwrm_ring_alloc_rx, /* HWRM_RING_ALLOC - RX Ring */
2136 bnxt_hwrm_ring_alloc_grp, /* HWRM_RING_GRP_ALLOC - Group */
2137 bnxt_hwrm_vnic_alloc, /* HWRM_VNIC_ALLOC */
2138 bnxt_post_rx_buffers, /* Post RX buffers */
2139 bnxt_hwrm_set_async_event, /* ENABLES_ASYNC_EVENT_CR */
2140 bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
2141 bnxt_hwrm_cfa_l2_filter_alloc, /* HWRM_CFA_L2_FILTER_ALLOC */
2142 bnxt_get_phy_link, /* HWRM_PORT_PHY_QCFG - PhyLink */
2143 bnxt_set_rx_mask, /* HWRM_CFA_L2_SET_RX_MASK */
2144 NULL,
2145};
2146
2147int bnxt_hwrm_run ( hwrm_func_t cmds[], struct bnxt *bp )
2148{
2149 hwrm_func_t *ptr;
2150 int ret;
2151
2152 for ( ptr = cmds; *ptr; ++ptr ) {
2153 memset ( ( void * ) REQ_DMA_ADDR ( bp ), 0, REQ_BUFFER_SIZE );
2154 memset ( ( void * ) RESP_DMA_ADDR ( bp ), 0, RESP_BUFFER_SIZE );
2155 ret = ( *ptr ) ( bp );
2156 if ( ret ) {
2157 DBGP ( "- %s ( ): Failed\n", __func__ );
2158 return STATUS_FAILURE;
2159 }
2160 }
2161 return STATUS_SUCCESS;
2162}
2163
2164#define bnxt_down_chip( bp ) bnxt_hwrm_run ( bring_down_chip, bp )
2165#define bnxt_up_chip( bp ) bnxt_hwrm_run ( bring_up_chip, bp )
2166#define bnxt_down_nic( bp ) bnxt_hwrm_run ( bring_down_nic, bp )
2167#define bnxt_up_nic( bp ) bnxt_hwrm_run ( bring_up_nic, bp )
2168#define bnxt_up_init( bp ) bnxt_hwrm_run ( bring_up_init, bp )
2169
2170static int bnxt_open ( struct net_device *dev )
2171{
2172 struct bnxt *bp = dev->priv;
2173
2174 DBGP ( "%s\n", __func__ );
2175
2176 /* Allocate and Initialise device specific parameters */
2177 if ( bnxt_alloc_rings_mem ( bp ) != 0 ) {
2178 DBGP ( "- %s ( ): bnxt_alloc_rings_mem Failed\n", __func__ );
2179 return -ENOMEM;
2180 }
2181
2182 bnxt_mm_nic ( bp );
2183
2184 if ( bnxt_up_chip ( bp ) != 0 ) {
2185 DBGP ( "- %s ( ): bnxt_up_chip Failed\n", __func__ );
2186 goto err_bnxt_open;
2187 }
2188
2189 if ( bnxt_up_nic ( bp ) != 0 ) {
2190 DBGP ( "- %s ( ): bnxt_up_nic\n", __func__);
2191 goto err_bnxt_open;
2192 }
2193
2194 return 0;
2195
2196err_bnxt_open:
2197 bnxt_down_nic ( bp );
2198
2200
2201 return -1;
2202}
2203
2204static void bnxt_tx_adjust_pkt ( struct bnxt *bp, struct io_buffer *iob )
2205{
2206 u16 prev_len = iob_len ( iob );
2207
2208 bp->vlan_tx = bnxt_get_pkt_vlan ( ( char * )iob->data );
2209 if ( !bp->vlan_tx && bp->vlan_id )
2210 bnxt_add_vlan ( iob, bp->vlan_id );
2211
2212 dbg_tx_vlan ( bp, ( char * )iob->data, prev_len, iob_len ( iob ) );
2213 if ( iob_len ( iob ) != prev_len )
2214 prev_len = iob_len ( iob );
2215
2216}
2217
2218static int bnxt_tx ( struct net_device *dev, struct io_buffer *iob )
2219{
2220 struct bnxt *bp = dev->priv;
2221 u16 len, entry;
2222 physaddr_t mapping;
2223
2224 if ( bp->er.er_rst_on ) {
2225 /* Error recovery has been initiated */
2226 return -EBUSY;
2227 }
2228
2229 if ( bnxt_tx_avail ( bp ) < 1 ) {
2230 DBGP ( "- %s ( ): Failed no bd's available\n", __func__ );
2231 return -ENOBUFS;
2232 }
2233
2234 mapping = iob_dma ( iob );
2235 bnxt_tx_adjust_pkt ( bp, iob );
2236 entry = bp->tx.prod_id;
2237 len = iob_len ( iob );
2238 bp->tx.iob[entry] = iob;
2239 bnxt_set_txq ( bp, entry, mapping, len );
2240 entry = NEXT_IDX ( entry, bp->tx.ring_cnt );
2241 /* If the ring has wrapped, toggle the epoch bit */
2242 if ( bp->tx.prod_id > entry )
2243 bp->tx.epoch ^= 1;
2244 dump_tx_pkt ( ( u8 * ) iob->data, len, bp->tx.prod_id );
2245 /* Packets are ready, update Tx producer idx local and on card. */
2246 bnxt_db_tx ( bp, ( u32 ) entry );
2247 bp->tx.prod_id = entry;
2248 bp->tx.cnt_req++;
2249 /* memory barrier */
2250 mb ( );
2251 return 0;
2252}
2253
2254static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt )
2255{
2256 u16 cons_id;
2257
2258 cons_id = bp->nq.cons_id + cnt;
2259 if ( cons_id >= bp->nq.ring_cnt ) {
2260 /* Toggle completion bit when the ring wraps. */
2261 bp->nq.completion_bit ^= 1;
2262 bp->nq.epoch ^= 1;
2263 cons_id = cons_id - bp->nq.ring_cnt;
2264 }
2265 bp->nq.cons_id = cons_id;
2266}
2267
2268void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt )
2269{
2270 if ( evt->event_data1 & 0x01 )
2271 bp->link_status = STATUS_LINK_ACTIVE;
2272 else
2273 bp->link_status = STATUS_LINK_DOWN;
2274
2275 bnxt_set_link ( bp );
2276 dbg_link_status ( bp );
2277}
2278
2279#define BNXT_FW_HEALTH_WIN_OFF 0x3000
2280#define BNXT_REG_WINDOW_BASE 0x400
2281#define BNXT_GRC_BASE_MASK 0xfff
2282#define BNXT_GRC_OFFSET_MASK 0xffc
2283
2284u32 bnxt_er_reg_write ( struct bnxt *bp, u32 reg_addr, u32 reg_val)
2285{
2286 u32 reg_base = 0;
2287
2288 reg_base = reg_addr & ~BNXT_GRC_BASE_MASK;
2289
2290 writel ( reg_base, bp->bar0 + BNXT_REG_WINDOW_BASE + 8 );
2291
2292 writel ( reg_val, bp->bar0 + ( BNXT_FW_HEALTH_WIN_OFF +
2293 ( reg_addr & BNXT_GRC_OFFSET_MASK ) ) );
2294
2295 DBGP ("bnxt_er_reg_write: reg_addr = %x, reg_val = %x\n", reg_addr, reg_val);
2296 return reg_val;
2297}
2298
2299u32 bnxt_er_reg_read ( struct bnxt *bp, u32 reg_addr)
2300{
2301 u32 reg_val = 0;
2302 u32 reg_base = 0;
2303
2304 reg_base = reg_addr & ~BNXT_GRC_BASE_MASK;
2305
2306 writel ( reg_base, bp->bar0 + BNXT_REG_WINDOW_BASE + 8 );
2307
2308 reg_val = readl ( bp->bar0 + ( BNXT_FW_HEALTH_WIN_OFF +
2309 ( reg_addr & BNXT_GRC_OFFSET_MASK ) ) );
2310
2311 DBGP ("bnxt_er_reg_read: reg_addr = %x, reg_val = %x\n", reg_addr, reg_val);
2312 return reg_val;
2313}
2314
2315u32 bnxt_er_get_reg_val ( struct bnxt *bp, u32 reg_addr, u32 reg_type, u32 mask )
2316{
2317 u32 reg_val = 0;
2318
2319 switch ( reg_type ) {
2321 pci_read_config_dword ( bp->pdev, reg_addr & mask, &reg_val );
2322 break;
2324 reg_val = bnxt_er_reg_read ( bp, reg_addr );
2325 break;
2327 reg_val = readl ( bp->bar0 + ( reg_addr & mask ) );
2328 break;
2330 reg_val = readl ( bp->bar1 + ( reg_addr & mask ) );
2331 break;
2332 default:
2333 break;
2334 }
2335 DBGP ( "read_reg_val bp %p addr %x type %x : reg_val = %x\n", bp, reg_addr, reg_type, reg_val );
2336 return reg_val;
2337}
2338
2339void bnxt_rst_reg_val ( struct bnxt *bp, u32 reg_addr, u32 reg_val )
2340{
2342 u32 reg_type = reg_addr & ER_QCFG_RESET_REG_ADDR_SPACE_MASK;
2343
2344 switch ( reg_type ) {
2346 pci_write_config_dword ( bp->pdev, reg_addr & mask, reg_val );
2347 break;
2349 bnxt_er_reg_write ( bp, reg_addr, reg_val );
2350 break;
2352 writel ( reg_val, bp->bar0 + ( reg_addr & mask ) );
2353 break;
2355 writel ( reg_val, bp->bar1 + ( reg_addr & mask ) );
2356 break;
2357 default:
2358 break;
2359 }
2360}
2361
2363{
2364 u32 delay_time = 0;
2365 u8 i;
2366
2367 for ( i = 0; i < bp->er.reg_array_cnt; i++ ) {
2368 bnxt_rst_reg_val ( bp, bp->er.rst_reg[i], bp->er.rst_reg_val[i] );
2369
2370 delay_time = bp->er.delay_after_rst[i];
2371 if ( delay_time ) {
2372 udelay ( delay_time * 100000 );
2373 }
2374 }
2375
2376}
2377
2378void bnxt_er_task ( struct bnxt* bp, u8 hb_task )
2379{
2380 u32 present_hb_cnt;
2381 unsigned short pci_command, new_command;
2382 u8 i;
2383
2384 DBGP ( "%s(hb_task: %d)\n", __func__, hb_task );
2385 if ( bp->er.er_rst_on ) {
2386 if ( timer_running ( &bp->wait_timer) ) {
2387 /* Reset already in progress */
2388 return;
2389 }
2390 }
2391
2392 if ( hb_task ) {
2393 present_hb_cnt = bnxt_er_get_reg_val ( bp,
2394 bp->er.fw_hb_reg,
2395 bp->er.fw_hb_reg & ER_QCFG_FW_HB_REG_ADDR_SPACE_MASK,
2397
2398 if ( present_hb_cnt != bp->er.last_fw_hb ) {
2399 bp->er.last_fw_hb = present_hb_cnt;
2400 return;
2401 }
2402 }
2403
2404 /* Heartbeat not incrementing, trigger error recovery */
2405 DBGP ( "%s(): Trigger Error Recovery\n", __func__ );
2406 bp->er.er_rst_on = 1;
2407 /* Set a recovery phase wait timer */
2408 start_timer_fixed ( &bp->wait_timer, BNXT_ER_WAIT_TIMER_INTERVAL ( bp ) );
2409
2410 /* Disable bus master */
2411 pci_read_config_word ( bp->pdev, PCI_COMMAND, &pci_command );
2412 new_command = pci_command & ~PCI_COMMAND_MASTER;
2413 pci_write_config_word ( bp->pdev, PCI_COMMAND, new_command );
2414
2415 /* Free up resources */
2416 bnxt_free_rx_iob ( bp );
2417
2418 /* wait for firmware to be operational */
2419 udelay ( bp->er.rst_min_dsecs * 100000 );
2420
2421 /* Reconfigure the PCI attributes */
2422 pci_write_config_word ( bp->pdev, PCI_COMMAND, pci_command );
2423
2424 if ( hb_task ) {
2425 if ( bp->er.master_pf ) {
2426 /* wait for master func wait period */
2427 udelay ( bp->er.master_wait_period * 100000 );
2428
2429 /* Reset register values */
2431
2432 /* wait for master wait post reset */
2433 udelay ( bp->er.master_wait_post_rst * 100000 );
2434 } else {
2435 /* wait for normal func wait period */
2436 udelay ( bp->er.normal_wait_period * 100000 );
2437 }
2438 }
2439
2440 for ( i = 0; i < bp->er.max_bailout_post_rst; i++ ) {
2441 bp->er.fw_health_status = bnxt_er_get_reg_val ( bp,
2442 bp->er.fw_status_reg,
2443 bp->er.fw_status_reg & ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK,
2445
2446 if ( bp->er.fw_health_status == FW_STATUS_REG_CODE_READY )
2447 break;
2448
2449 /* wait for 1 second */
2450 udelay ( 1000000 );
2451 }
2452
2453 if ( bp->er.fw_health_status == FW_STATUS_REG_CODE_READY ) {
2454 /* Initialize resources */
2455 bnxt_mm_nic ( bp );
2456
2457 /* Get device specific information */
2458 bnxt_up_chip ( bp );
2459
2460 /* Allocate queues */
2461 bnxt_up_nic ( bp );
2462 }
2463
2464 /* Clear Reset in progress flag */
2465 bp->er.er_rst_on = 0;
2466 stop_timer ( &bp->wait_timer );
2467}
2468
2470 struct hwrm_async_event_cmpl *evt )
2471{
2472 if ( evt->event_data1 &
2474 bp->er.driver_initiated_recovery = 1;
2475 start_timer_fixed ( &bp->task_timer, BNXT_ER_TIMER_INTERVAL ( bp ) );
2476
2477 } else {
2478 bp->er.driver_initiated_recovery = 0;
2479 stop_timer ( &bp->task_timer );
2480 }
2481
2482 if ( evt->event_data1 &
2484 bp->er.master_pf = 1;
2485 } else {
2486 bp->er.master_pf = 0;
2487 }
2488
2489 bp->er.fw_health_status = bnxt_er_get_reg_val ( bp,
2490 bp->er.fw_status_reg,
2491 bp->er.fw_status_reg & ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK,
2493 /* Intialize the last fw heart beat count */
2494 bp->er.last_fw_hb = 0;
2495 bp->er.last_fw_rst_cnt = bnxt_er_get_reg_val ( bp,
2496 bp->er.fw_rst_cnt_reg,
2497 bp->er.fw_rst_cnt_reg & ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_MASK,
2499 bp->er.rst_in_progress = bnxt_er_get_reg_val ( bp,
2500 bp->er.rst_inprg_reg,
2501 bp->er.rst_inprg_reg & ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_MASK,
2503 bp->er.err_recovery_cnt = bnxt_er_get_reg_val ( bp,
2504 bp->er.recvry_cnt_reg,
2505 bp->er.recvry_cnt_reg & ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_MASK,
2507}
2508
2510 struct hwrm_async_event_cmpl *evt )
2511{
2512 DBGP ( "Reset Notify Async event" );
2513 if ( ( ( evt->event_data1 ) &
2516 DBGP ( " error recovery initiated\n" );
2517 bp->er.rst_min_dsecs = evt->timestamp_lo;
2518 bp->er.rst_max_dsecs = evt->timestamp_hi;
2519
2520 if ( bp->er.rst_min_dsecs == 0 )
2521 bp->er.rst_min_dsecs = ER_DFLT_FW_RST_MIN_DSECS;
2522
2523 if ( bp->er.rst_max_dsecs == 0 )
2524 bp->er.rst_max_dsecs = ER_DFLT_FW_RST_MAX_DSECS;
2525
2526 // Trigger Error recovery
2527 bp->er.er_initiate = 1;
2528 }
2529}
2530
2532{
2534 DBGP ("bnxt_link_speed_evt: event data = %lx\n",
2536 }
2537
2539 return;
2540 }
2541
2542 bnxt_set_link ( bp );
2543 dbg_link_info ( bp );
2544 dbg_link_status ( bp );
2545}
2546
2560
2575
2576static void bnxt_service_cq ( struct net_device *dev )
2577{
2578 struct bnxt *bp = dev->priv;
2579 struct cmpl_base *cmp;
2580 struct tx_cmpl *tx;
2581 u16 old_cid = bp->cq.cons_id;
2583 u32 cq_type;
2584 struct hwrm_async_event_cmpl *evt;
2585
2586 while ( done == SERVICE_NEXT_CQ_BD ) {
2587 cmp = ( struct cmpl_base * ) BD_NOW ( CQ_DMA_ADDR ( bp ),
2588 bp->cq.cons_id,
2589 sizeof ( struct cmpl_base ) );
2590
2591 if ( ( cmp->info3_v & CMPL_BASE_V ) ^ bp->cq.completion_bit )
2592 break;
2593
2594 cq_type = cmp->type & CMPL_BASE_TYPE_MASK;
2595 dump_evt ( ( u8 * ) cmp, cq_type, bp->cq.cons_id, 0 );
2596 dump_cq ( cmp, bp->cq.cons_id, bp->nq.toggle );
2597
2598 switch ( cq_type ) {
2600 tx = ( struct tx_cmpl * ) cmp;
2601 bnxt_tx_complete ( dev, ( u16 ) tx->opaque );
2602 /* Fall through */
2604 bnxt_adv_cq_index ( bp, 1 );
2605 break;
2608 done = bnxt_rx_complete ( dev,
2609 ( struct rx_pkt_cmpl * ) cmp );
2610 break;
2612 evt = ( struct hwrm_async_event_cmpl * ) cmp;
2613 switch ( evt->event_id ) {
2615 bnxt_link_evt ( bp,
2616 ( struct hwrm_async_event_cmpl * ) cmp );
2617 break;
2620 ( struct hwrm_async_event_cmpl * ) cmp );
2621 break;
2624 ( struct hwrm_async_event_cmpl * ) cmp );
2625 break;
2628 ( struct hwrm_async_event_cmpl * ) cmp );
2629 break;
2632 ( struct hwrm_async_event_cmpl * ) cmp );
2633 break;
2636 ( struct hwrm_async_event_cmpl * ) cmp );
2637 break;
2638 default:
2639 break;
2640 }
2641 bnxt_adv_cq_index ( bp, 1 );
2642 break;
2643 default:
2645 break;
2646 }
2647 }
2648
2649 if ( bp->cq.cons_id != old_cid )
2650 bnxt_db_cq ( bp );
2651}
2652
2653static void bnxt_service_nq ( struct net_device *dev )
2654{
2655 struct bnxt *bp = dev->priv;
2656 struct nq_base *nqp;
2657 u16 old_cid = bp->nq.cons_id;
2659 u32 nq_type;
2660 struct hwrm_async_event_cmpl *evt;
2661
2662 if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
2663 return;
2664
2665 while ( done == SERVICE_NEXT_NQ_BD ) {
2666 nqp = ( struct nq_base * ) BD_NOW ( NQ_DMA_ADDR ( bp ),
2667 bp->nq.cons_id,
2668 sizeof ( struct nq_base ) );
2669 if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit )
2670 break;
2671 nq_type = ( nqp->type & NQ_CN_TYPE_MASK );
2672 bp->nq.toggle = ( ( nqp->type & NQ_CN_TOGGLE_MASK ) >> NQ_CN_TOGGLE_SFT );
2673 dump_evt ( ( u8 * )nqp, nq_type, bp->nq.cons_id, 1 );
2674 dump_nq ( nqp, bp->nq.cons_id );
2675
2676 switch ( nq_type ) {
2678 evt = ( struct hwrm_async_event_cmpl * ) nqp;
2679 switch ( evt->event_id ) {
2681 bnxt_link_evt ( bp,
2682 ( struct hwrm_async_event_cmpl * ) nqp );
2683 break;
2686 ( struct hwrm_async_event_cmpl * ) nqp );
2687 break;
2690 ( struct hwrm_async_event_cmpl * ) nqp );
2691 break;
2694 ( struct hwrm_async_event_cmpl * ) nqp );
2695 break;
2698 ( struct hwrm_async_event_cmpl * ) nqp );
2699 break;
2702 ( struct hwrm_async_event_cmpl * ) nqp );
2703 break;
2704 default:
2705 break;
2706 }
2707 bnxt_adv_nq_index ( bp, 1 );
2708 break;
2710 bnxt_adv_nq_index ( bp, 1 );
2711 break;
2712 default:
2714 break;
2715 }
2716 }
2717
2718 if ( bp->nq.cons_id != old_cid )
2719 bnxt_db_nq ( bp );
2720}
2721
2722static void bnxt_er_task_timer ( struct retry_timer *timer, int over __unused )
2723{
2724 struct bnxt *bp = container_of ( timer, struct bnxt, task_timer );
2725
2726 /* Restart timer */
2728 if ( bp->er.driver_initiated_recovery ) {
2729 bnxt_er_task ( bp, 1 );
2730 }
2731}
2732
2733static void bnxt_er_wait_timer ( struct retry_timer *timer, int over __unused )
2734{
2735 struct bnxt *bp = container_of (timer, struct bnxt, wait_timer );
2736 /* The sole function of this timer is to wait for the specified
2737 * amount of time to complete error recovery phase
2738 */
2739 stop_timer ( &bp->wait_timer );
2740 return;
2741}
2742
2743static void bnxt_poll ( struct net_device *dev )
2744{
2745 struct bnxt *bp = dev->priv;
2746
2747 mb ( );
2748 bnxt_service_nq ( dev );
2749 bnxt_service_cq ( dev );
2750
2751 if ( bp->er.er_initiate ) {
2752 bnxt_er_task ( bp, 0 );
2753 bp->er.er_initiate = 0;
2754 }
2755
2756}
2757
2758static void bnxt_close ( struct net_device *dev )
2759{
2760 struct bnxt *bp = dev->priv;
2761
2762 DBGP ( "%s\n", __func__ );
2763 stop_timer ( &bp->task_timer );
2764 stop_timer ( &bp->wait_timer );
2765
2766 bnxt_down_nic ( bp );
2767
2769
2770}
2771
2773 .open = bnxt_open,
2774 .close = bnxt_close,
2775 .poll = bnxt_poll,
2776 .transmit = bnxt_tx,
2777};
2778
2779static int bnxt_init_one ( struct pci_device *pci )
2780{
2781 struct net_device *netdev;
2782 struct bnxt *bp;
2783 int err = 0;
2784
2785 DBGP ( "%s\n", __func__ );
2786 /* Allocate network device */
2787 netdev = alloc_etherdev ( sizeof ( *bp ) );
2788 if ( !netdev ) {
2789 DBGP ( "- %s ( ): alloc_etherdev Failed\n", __func__ );
2790 err = -ENOMEM;
2791 goto disable_pdev;
2792 }
2793
2794 /* Initialise network device */
2796
2797 /* Driver private area for this device */
2798 bp = netdev->priv;
2799
2800 /* Set PCI driver private data */
2801 pci_set_drvdata ( pci, netdev );
2802
2803 /* Clear Private area data */
2804 memset ( bp, 0, sizeof ( *bp ) );
2805 bp->pdev = pci;
2806 bp->dev = netdev;
2807 netdev->dev = &pci->dev;
2808
2809 timer_init ( &bp->task_timer, bnxt_er_task_timer, &netdev->refcnt );
2810 timer_init ( &bp->wait_timer, bnxt_er_wait_timer, &netdev->refcnt );
2811
2812 /* Configure DMA */
2813 bp->dma = &pci->dma;
2814 netdev->dma = bp->dma;
2815
2816 /* Enable PCI device */
2817 adjust_pci_device ( pci );
2818
2819 /* Get PCI Information */
2821
2822 /* Allocate HWRM memory */
2823 if ( ( err = bnxt_alloc_hwrm_mem ( bp ) ) != 0 )
2824 goto err_alloc_hwrm;
2825
2826 bp->link_status = STATUS_LINK_DOWN;
2827 bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
2828 if ( ( err = bnxt_up_init ( bp ) ) != 0 )
2829 goto err_up_init;
2830
2831 /* Register Network device */
2832 if ( ( err = register_netdev ( netdev ) ) != 0 ) {
2833 DBGP ( "- %s ( ): register_netdev Failed\n", __func__ );
2834 goto err_register_netdev;
2835 }
2836
2837 /* Set Initial Link State */
2838 bnxt_set_link ( bp );
2839
2840 return 0;
2841
2843err_register_netdev:
2844err_up_init:
2846err_alloc_hwrm:
2847 bnxt_down_pci ( bp );
2849 netdev_put ( netdev );
2850disable_pdev:
2851 pci_set_drvdata ( pci, NULL );
2852 return err;
2853}
2854
2855static void bnxt_remove_one ( struct pci_device *pci )
2856{
2857 struct net_device *netdev = pci_get_drvdata ( pci );
2858 struct bnxt *bp = netdev->priv;
2859
2860 DBGP ( "%s\n", __func__ );
2861 /* Unregister network device */
2863
2864 /* Free HWRM buffers */
2866
2867 /* iounmap PCI BAR ( s ) */
2868 bnxt_down_pci ( bp );
2869
2870 /* Stop network device */
2872
2873 /* Drop refernce to network device */
2874 netdev_put ( netdev );
2875 DBGP ( "%s - Done\n", __func__ );
2876}
2877
2878/* Broadcom NXE PCI driver */
2879struct pci_driver bnxt_pci_driver __pci_driver = {
2880 .ids = bnxt_nics,
2881 .id_count = ARRAY_SIZE ( bnxt_nics ),
2882 .probe = bnxt_init_one,
2883 .remove = bnxt_remove_one,
2884};
#define NULL
NULL pointer (VOID *)
Definition Base.h:322
struct golan_eqe_cmd cmd
Definition CIB_PRM.h:1
uint32_t flag
Flag number.
Definition aqc1xx.h:2
struct arbelprm_rc_send_wqe rc
Definition arbel.h:3
unsigned long physaddr_t
Definition stdint.h:20
static const void * src
Definition string.h:48
#define assert(condition)
Assert a condition at run-time.
Definition assert.h:50
#define MAX_TX_DESC_CNT
Definition bnx2.h:3881
#define MAX_RX_DESC_CNT
Definition bnx2.h:3885
static int bnxt_hwrm_set_rx_mask(struct bnxt *bp, u32 rx_mask)
Definition bnxt.c:1276
static int bnxt_hwrm_error_recovery_req(struct bnxt *bp)
Definition bnxt.c:1028
static int bnxt_hwrm_ring_alloc_nq(struct bnxt *bp)
Definition bnxt.c:1964
static int bnxt_hwrm_ring_free_grp(struct bnxt *bp)
Definition bnxt.c:1756
static u32 bnxt_tx_avail(struct bnxt *bp)
Definition bnxt.c:286
void bnxt_rst_er_registers(struct bnxt *bp)
Definition bnxt.c:2362
int bnxt_alloc_rings_mem(struct bnxt *bp)
Definition bnxt.c:636
static void bnxt_poll(struct net_device *dev)
Definition bnxt.c:2743
static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
Definition bnxt.c:1733
static int bnxt_hwrm_vnic_alloc(struct bnxt *bp)
Definition bnxt.c:1989
static int bnxt_alloc_rx_iob(struct bnxt *bp, u16 cons_id, u16 iob_idx)
Definition bnxt.c:364
static void bnxt_close(struct net_device *dev)
Definition bnxt.c:2758
u32 bnxt_er_reg_read(struct bnxt *bp, u32 reg_addr)
Definition bnxt.c:2299
static int bnxt_hwrm_ring_free_cq(struct bnxt *bp)
Definition bnxt.c:1919
hwrm_func_t bring_down_nic[]
Definition bnxt.c:2096
static void bnxt_db_nq(struct bnxt *bp)
Definition bnxt.c:216
int bnxt_free_rx_iob(struct bnxt *bp)
Definition bnxt.c:331
hwrm_func_t bring_down_chip[]
Definition bnxt.c:2091
static int bnxt_hwrm_ring_alloc_grp(struct bnxt *bp)
Definition bnxt.c:1779
#define BNXT_FW_HEALTH_WIN_OFF
Definition bnxt.c:2279
static int bnxt_get_pci_info(struct bnxt *bp)
Definition bnxt.c:131
static int bnxt_open(struct net_device *dev)
Definition bnxt.c:2170
static void bnxt_db_cq(struct bnxt *bp)
Definition bnxt.c:229
static int bnxt_hwrm_ring_alloc_rx(struct bnxt *bp)
Definition bnxt.c:1913
static int bnxt_tx(struct net_device *dev, struct io_buffer *iob)
Definition bnxt.c:2218
void bnxt_mm_init_hwrm(struct bnxt *bp, const char *func)
Definition bnxt.c:511
hwrm_func_t bring_up_chip[]
Definition bnxt.c:2111
#define BNXT_GRC_BASE_MASK
Definition bnxt.c:2281
static int bnxt_get_link_state(struct bnxt *bp)
Definition bnxt.c:2070
static int bnxt_reset_rx_mask(struct bnxt *bp)
Definition bnxt.c:2065
void bnxt_er_task(struct bnxt *bp, u8 hb_task)
Definition bnxt.c:2378
static int bnxt_hwrm_cfa_l2_filter_alloc(struct bnxt *bp)
Definition bnxt.c:1192
static void bnxt_service_cq(struct net_device *dev)
Definition bnxt.c:2576
static void bnxt_tx_adjust_pkt(struct bnxt *bp, struct io_buffer *iob)
Definition bnxt.c:2204
void bnxt_set_txq(struct bnxt *bp, int entry, physaddr_t mapping, int len)
Definition bnxt.c:298
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
Definition bnxt.c:1091
static int bnxt_hwrm_func_qcfg_req(struct bnxt *bp)
Definition bnxt.c:925
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
Definition bnxt.c:1465
static int bnxt_hwrm_cfa_l2_filter_free(struct bnxt *bp)
Definition bnxt.c:1233
hwrm_func_t bring_up_nic[]
Definition bnxt.c:2129
static void bnxt_down_pci(struct bnxt *bp)
Definition bnxt.c:105
void bnxt_link_speed_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2531
static int bnxt_hwrm_backing_store_qcfg(struct bnxt *bp)
Definition bnxt.c:1433
static int bnxt_hwrm_backing_store_cfg(struct bnxt *bp)
Definition bnxt.c:1448
#define BNXT_GRC_OFFSET_MASK
Definition bnxt.c:2282
u8 bnxt_is_pci_vf(struct pci_device *pdev)
Check if Virtual Function.
Definition bnxt.c:97
static int bnxt_hwrm_ring_alloc_tx(struct bnxt *bp)
Definition bnxt.c:1907
static int bnxt_hwrm_ver_get(struct bnxt *bp)
Definition bnxt.c:727
static int bnxt_get_link_speed(struct bnxt *bp)
Definition bnxt.c:1349
static int bnxt_hwrm_func_resource_qcaps(struct bnxt *bp)
Definition bnxt.c:775
static void bnxt_db_rx(struct bnxt *bp, u32 idx)
Definition bnxt.c:243
int bnxt_hwrm_ring_free(struct bnxt *bp, u16 ring_id, u8 ring_type)
Definition bnxt.c:1810
static int wait_resp(struct bnxt *bp, u32 tmo, u16 len, const char *func)
Definition bnxt.c:696
static void bnxt_er_task_timer(struct retry_timer *timer, int over __unused)
Definition bnxt.c:2722
static int bnxt_hwrm_ring_free_nq(struct bnxt *bp)
Definition bnxt.c:1971
u32 bnxt_er_get_reg_val(struct bnxt *bp, u32 reg_addr, u32 reg_type, u32 mask)
Definition bnxt.c:2315
static void bnxt_set_link(struct bnxt *bp)
Definition bnxt.c:179
static u32 bnxt_set_ring_info(struct bnxt *bp)
Definition bnxt.c:837
int bnxt_post_rx_buffers(struct bnxt *bp)
Definition bnxt.c:381
void bnxt_free_rings_mem(struct bnxt *bp)
Definition bnxt.c:570
static int bnxt_hwrm_vnic_free(struct bnxt *bp)
Definition bnxt.c:2012
static int bnxt_hwrm_port_mac_cfg(struct bnxt *bp)
Definition bnxt.c:1489
void bnxt_add_vlan(struct io_buffer *iob, u16 vlan)
Definition bnxt.c:265
void bnxt_rst_reg_val(struct bnxt *bp, u32 reg_addr, u32 reg_val)
Definition bnxt.c:2339
static void * bnxt_pci_base(struct pci_device *pdev, unsigned int reg)
Definition bnxt.c:122
static int bnxt_hwrm_nvm_get_variable_req(struct bnxt *bp, u16 data_len, u16 option_num, u16 dimensions, u16 index_0)
Definition bnxt.c:1330
static void bnxt_db_tx(struct bnxt *bp, u32 idx)
Definition bnxt.c:254
void bnxt_link_speed_chg_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2547
int bnxt_alloc_hwrm_mem(struct bnxt *bp)
Definition bnxt.c:616
void bnxt_mm_init_rings(struct bnxt *bp, const char *func)
Definition bnxt.c:522
static struct pci_device_id bnxt_nics[]
Definition bnxt.c:28
#define bnxt_up_chip(bp)
Definition bnxt.c:2165
static int bnxt_set_rx_mask(struct bnxt *bp)
Definition bnxt.c:2060
void bnxt_process_er_event(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2469
#define bnxt_up_init(bp)
Definition bnxt.c:2168
static struct net_device_operations bnxt_netdev_ops
Definition bnxt.c:2772
static void bnxt_set_rx_desc(u8 *buf, struct io_buffer *iob, u16 cid, u32 idx)
Definition bnxt.c:351
static int bnxt_hwrm_ring_free_tx(struct bnxt *bp)
Definition bnxt.c:1934
static void bnxt_hwrm_assign_resources(struct bnxt *bp)
Definition bnxt.c:872
int(* hwrm_func_t)(struct bnxt *bp)
Definition bnxt.c:2080
static int bnxt_init_one(struct pci_device *pci)
Definition bnxt.c:2779
#define BNXT_REG_WINDOW_BASE
Definition bnxt.c:2280
hwrm_func_t bring_up_init[]
Definition bnxt.c:2082
void bnxt_port_phy_chg_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2561
static void bnxt_adv_nq_index(struct bnxt *bp, u16 cnt)
Definition bnxt.c:2254
void bnxt_rx_process(struct net_device *dev, struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi)
Definition bnxt.c:466
static int bnxt_get_device_address(struct bnxt *bp)
Definition bnxt.c:165
#define bnxt_down_nic(bp)
Definition bnxt.c:2166
static int bnxt_hwrm_vnic_cfg(struct bnxt *bp)
Definition bnxt.c:2035
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
Definition bnxt.c:1133
static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp)
Definition bnxt.c:1504
static void short_hwrm_cmd_req(struct bnxt *bp, u16 len, u16 req_type)
Definition bnxt.c:679
static int bnxt_hwrm_func_cfg_req(struct bnxt *bp)
Definition bnxt.c:1004
static void dev_p7_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, u32 toggle)
Definition bnxt.c:202
void bnxt_mm_nic(struct bnxt *bp)
Definition bnxt.c:541
static int bnxt_hwrm_ring_alloc_cq(struct bnxt *bp)
Definition bnxt.c:1901
static int bnxt_hwrm_func_qcaps_req(struct bnxt *bp)
Definition bnxt.c:890
static void bnxt_tx_complete(struct net_device *dev, u16 hw_idx)
Definition bnxt.c:318
void bnxt_link_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2268
static void hwrm_init(struct bnxt *bp, struct input *req, u16 cmd, u16 len)
Definition bnxt.c:658
static void bnxt_er_wait_timer(struct retry_timer *timer, int over __unused)
Definition bnxt.c:2733
#define bnxt_up_nic(bp)
Definition bnxt.c:2167
static int bnxt_hwrm_port_phy_qcaps_req(struct bnxt *bp)
Definition bnxt.c:966
u32 set_rx_mask(u32 rx_mask)
Definition bnxt.c:1257
static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, u16 idx)
Definition bnxt.c:1291
static void bnxt_service_nq(struct net_device *dev)
Definition bnxt.c:2653
static int bnxt_hwrm_set_async_event(struct bnxt *bp)
Definition bnxt.c:1154
static int bnxt_hwrm_ring_free_rx(struct bnxt *bp)
Definition bnxt.c:1949
static void hwrm_write_req(struct bnxt *bp, void *req, u32 cnt)
Definition bnxt.c:668
static int bnxt_get_phy_link(struct bnxt *bp)
Definition bnxt.c:1686
static int bnxt_rx_complete(struct net_device *dev, struct rx_pkt_cmpl *rx)
Definition bnxt.c:491
static int bnxt_hwrm_ring_alloc(struct bnxt *bp, u8 type)
Definition bnxt.c:1823
static int bnxt_hwrm_func_reset_req(struct bnxt *bp)
Definition bnxt.c:990
static void bnxt_remove_one(struct pci_device *pci)
Definition bnxt.c:2855
static int bnxt_query_phy_link(struct bnxt *bp)
Definition bnxt.c:1653
u32 bnxt_er_reg_write(struct bnxt *bp, u32 reg_addr, u32 reg_val)
Definition bnxt.c:2284
static void dev_p5_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag)
Definition bnxt.c:187
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
Definition bnxt.c:1711
static void bnxt_adv_cq_index(struct bnxt *bp, u16 cnt)
Definition bnxt.c:452
u8 bnxt_rx_drop(struct bnxt *bp, struct io_buffer *iob, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len)
Definition bnxt.c:411
static u16 bnxt_get_pkt_vlan(char *src)
Definition bnxt.c:279
void bnxt_free_hwrm_mem(struct bnxt *bp)
Definition bnxt.c:596
void bnxt_process_reset_notify_event(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition bnxt.c:2509
int bnxt_hwrm_run(hwrm_func_t cmds[], struct bnxt *bp)
Definition bnxt.c:2147
#define DEFAULT_NUMBER_OF_STAT_CTXS
Definition bnxt.h:145
#define LINK_SPEED_FW_40G
Definition bnxt.h:309
#define MAX_CQ_DESC_CNT
Definition bnxt.h:149
#define LINK_SPEED_FW_1G
Definition bnxt.h:303
#define SET_MEDIUM_SPEED(bp, s)
Definition bnxt.h:117
#define STATUS_TIMEOUT
Definition bnxt.h:79
#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM
Definition bnxt.h:358
#define LINK_SPEED_FW_100G_PAM4
Definition bnxt.h:319
#define NQ_DMA_ADDR(bp)
Definition bnxt.h:178
#define NQ_CN_TYPE_MASK
Definition bnxt.h:568
#define GET_MEDIUM_SPEED(m)
Definition bnxt.h:116
#define MEDIUM_SPEED_25GBPS
Definition bnxt.h:101
#define CMPL_BASE_TYPE_TX_L2
Definition bnxt.h:522
#define IPXE_VERSION_UPDATE
Definition bnxt.h:33
#define STATUS_FAILURE
Definition bnxt.h:56
#define VALID_RING_NQ
Definition bnxt.h:908
#define RING_FREE(bp, rid, flag)
Definition bnxt.h:1081
#define ETHERTYPE_VLAN
Definition bnxt.h:191
#define MEDIA_AUTO_DETECT_MASK
Definition bnxt.h:1090
#define STAT_CTX_ID
Definition bnxt.h:175
#define D3_SPEED_FW_SHIFT
Definition bnxt.h:1089
#define SET_MEDIUM_DUPLEX(bp, d)
Definition bnxt.h:122
#define FW_STATUS_REG_CODE_READY
Definition bnxt.h:942
#define NQ_CN_TOGGLE_MASK
Definition bnxt.h:570
#define HWRM_CMD_DEFAULT_TIMEOUT
Definition bnxt.h:134
#define VALID_RING_GRP
Definition bnxt.h:904
#define CHIP_NUM_57608
Definition bnxt.h:1106
#define PORT_PHY_FLAGS
Definition bnxt.h:1078
#define MEDIUM_SPEED_400PAM4_112GBPS
Definition bnxt.h:111
#define MEDIUM_SPEED_50PAM4GBPS
Definition bnxt.h:106
#define CMPL_BASE_TYPE_RX_L2_V3
Definition bnxt.h:527
#define TX_RING_BUFFER_SIZE
Definition bnxt.h:150
#define SERVICE_NEXT_NQ_BD
Definition bnxt.h:186
#define SHORT_CMD_SUPPORTED
Definition bnxt.h:1062
#define RX_MASK
Definition bnxt.h:168
#define STATUS_LINK_DOWN
Definition bnxt.h:60
#define BD_NOW(bd, entry, len)
Definition bnxt.h:162
#define LINK_SPEED_DRV_NUM
Definition bnxt.h:220
#define STATUS_LINK_ACTIVE
Definition bnxt.h:59
#define DEFAULT_NUMBER_OF_RING_GRPS
Definition bnxt.h:144
#define DB_OFFSET_VF
Definition bnxt.h:196
#define RX_PKT_V3_CMPL_TYPE_RX_L2_V3
Definition bnxt.h:726
#define DMA_ALIGN_4K
Definition bnxt.h:155
#define DEFAULT_NUMBER_OF_TX_RINGS
Definition bnxt.h:142
#define BNXT_CQ_INTR_MODE(vf)
Definition bnxt.h:163
#define DBC_DBC_TYPE_SQ
Definition bnxt.h:432
#define IPXE_VERSION_MINOR
Definition bnxt.h:32
#define TX_BD_FLAGS
Definition bnxt.h:1073
#define LINK_SPEED_FW_AUTONEG
Definition bnxt.h:301
#define FLAG_RESET(f, b)
Definition bnxt.h:40
#define TX_BD_SHORT_FLAGS_LHINT_GTE2K
Definition bnxt.h:468
#define DBC_DBC_TYPE_SRQ
Definition bnxt.h:434
#define FLAG_TEST(f, b)
Definition bnxt.h:39
#define RX_RING_BUFFER_SIZE
Definition bnxt.h:151
#define DB_OFFSET_PF
Definition bnxt.h:195
#define MEDIUM_SPEED_200PAM4_112GBPS
Definition bnxt.h:109
#define RESP_BUFFER_SIZE
Definition bnxt.h:157
#define SET_LINK(p, m, s)
Definition bnxt.h:1082
#define RX_MASK_ACCEPT_ALL_MULTICAST
Definition bnxt.h:86
#define NQ_RING_BUFFER_SIZE
Definition bnxt.h:173
#define FLAG_SET(f, b)
Definition bnxt.h:38
#define BNXT_RX_STD_DMA_SZ
Definition bnxt.h:160
#define PHY_STATUS
Definition bnxt.h:206
#define DBC_MSG_XID(xid, flg)
Definition bnxt.h:199
#define VLAN_HDR_SIZE
Definition bnxt.h:190
#define LINK_SPEED_FW_10G
Definition bnxt.h:305
#define TX_DOORBELL_KEY_TX
Definition bnxt.h:384
#define MEDIUM_SPEED_100PAM4_112GBPS
Definition bnxt.h:108
#define RX_DMA_ADDR(bp)
Definition bnxt.h:181
#define REQ_BUFFER_SIZE
Definition bnxt.h:156
#define MEDIUM_SPEED_400PAM4GBPS
Definition bnxt.h:110
#define RX_MASK_ACCEPT_MULTICAST
Definition bnxt.h:85
#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
Definition bnxt.h:532
#define CMPL_BASE_TYPE_MASK
Definition bnxt.h:520
#define BNXT_ER_TIMER_INTERVAL(x)
Definition bnxt.h:1107
#define CQ_DOORBELL_KEY_IDX(a)
Definition bnxt.h:1069
#define LINK_SPEED_FW_50G
Definition bnxt.h:311
#define PCICFG_ME_REGISTER
Definition bnxt.h:125
#define ER_DFLT_FW_RST_MAX_DSECS
Definition bnxt.h:941
#define DMA_DMA_ADDR(bp)
Definition bnxt.h:184
#define CQ_DMA_ADDR(bp)
Definition bnxt.h:179
#define VALID_STAT_CTX
Definition bnxt.h:900
#define CMPL_BASE_TYPE_RX_L2
Definition bnxt.h:523
#define SPEED_DRV_SHIFT
Definition bnxt.h:1085
#define LINK_SPEED_FW_MASK
Definition bnxt.h:299
#define MAX_ETHERNET_PACKET_BUFFER_SIZE
Definition bnxt.h:140
#define CHIP_NUM_57502
Definition bnxt.h:1104
#define LINK_SPEED_FW_2_5G
Definition bnxt.h:329
#define TX_BD_SHORT_FLAGS_LHINT_LT512
Definition bnxt.h:465
#define LINK_SPEED_FW_50G_PAM4
Definition bnxt.h:317
#define TX_BD_SHORT_FLAGS_LHINT_LT2K
Definition bnxt.h:467
#define NO_MORE_CQ_BD_TO_SERVICE
Definition bnxt.h:187
#define DEFAULT_NUMBER_OF_RX_RINGS
Definition bnxt.h:143
#define DETECT_MEDIA
Definition bnxt.h:208
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT
Definition bnxt.h:674
#define DBC_DBC_TYPE_CQ
Definition bnxt.h:436
#define NUM_RX_BUFFERS
Definition bnxt.h:146
#define VALID_L2_FILTER
Definition bnxt.h:907
#define BNXT_FLAG_IS_CHIP_P5_PLUS
Definition bnxt.h:50
#define VALID_RING_RX
Definition bnxt.h:903
#define D3_LINK_SPEED_FW_NUM
Definition bnxt.h:334
#define LINK_SPEED_FW_NUM
Definition bnxt.h:298
#define MEDIUM_SPEED_100GBPS
Definition bnxt.h:104
#define SUPPORT_SPEEDS
Definition bnxt.h:209
#define MEDIUM_FULL_DUPLEX
Definition bnxt.h:119
#define MAX_NQ_DESC_CNT
Definition bnxt.h:172
#define NQ_CN_TYPE_CQ_NOTIFICATION
Definition bnxt.h:573
#define BNXT_FLAG_NPAR_MODE
Definition bnxt.h:45
#define NO_MORE_NQ_BD_TO_SERVICE
Definition bnxt.h:185
#define MEDIUM_SPEED_40GBPS
Definition bnxt.h:102
#define RX_PKT_CMPL_V2
Definition bnxt.h:670
#define REQ_DMA_ADDR(bp)
Definition bnxt.h:182
#define NEXT_IDX(N, S)
Definition bnxt.h:161
#define TX_IN_USE(a, b, c)
Definition bnxt.h:177
#define VF_CFG_ENABLE_FLAGS
Definition bnxt.h:1096
#define LINK_POLL_WAIT_TIME
Definition bnxt.h:167
#define DBC_MSG_EPCH(idx)
Definition bnxt.h:202
#define RX_MASK_PROMISCUOUS_MODE
Definition bnxt.h:89
#define BNXT_FLAG_RESOURCE_QCAPS_SUPPORT
Definition bnxt.h:43
#define SERVICE_NEXT_CQ_BD
Definition bnxt.h:188
#define RX_PKT_V3_CMPL_TYPE_MASK
Definition bnxt.h:718
#define VALID_RX_IOB
Definition bnxt.h:906
#define SUPPORT_SPEEDS2
Definition bnxt.h:210
#define BYTE_SWAP_S(w)
Definition bnxt.h:192
#define VALID_DRIVER_REG
Definition bnxt.h:899
#define HWRM_CMD_FLASH_MULTIPLAYER(a)
Definition bnxt.h:137
#define SPEED_FW_SHIFT
Definition bnxt.h:1087
#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT
Definition bnxt.h:870
#define TX_DMA_ADDR(bp)
Definition bnxt.h:180
#define CHIP_NUM_57504
Definition bnxt.h:1103
#define HWRM_CMD_WAIT(b)
Definition bnxt.h:139
#define RESP_DMA_ADDR(bp)
Definition bnxt.h:183
#define DBC_MSG_TOGGLE(idx)
Definition bnxt.h:204
#define SHORT_CMD_REQUIRED
Definition bnxt.h:1063
#define VALID_RING_TX
Definition bnxt.h:902
#define TX_BD_SHORT_FLAGS_LHINT_LT1K
Definition bnxt.h:466
#define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_SFT
Definition bnxt.h:822
#define MEDIUM_SPEED_200GBPS
Definition bnxt.h:105
#define MEDIUM_SPEED_100PAM4GBPS
Definition bnxt.h:107
#define LINK_SPEED_FW_400G_PAM4
Definition bnxt.h:325
#define MEDIUM_SPEED_50GBPS
Definition bnxt.h:103
#define D3_SPEED_FW_MASK
Definition bnxt.h:1088
#define HWRM_CMD_POLL_WAIT_TIME
Definition bnxt.h:135
#define LINK_SPEED_FW_100G
Definition bnxt.h:313
#define LINK_SPEED_FW_200G
Definition bnxt.h:315
#define BNXT_FLAG_PCI_VF
Definition bnxt.h:47
#define VALID_VNIC_ID
Definition bnxt.h:905
#define SPEED_DRV_MASK
Definition bnxt.h:1084
#define LINK_DEFAULT_TIMEOUT
Definition bnxt.h:166
#define LINK_SPEED_FW_200G_PAM4_112
Definition bnxt.h:323
#define LINK_SPEED_FW_100G_PAM4_112
Definition bnxt.h:321
#define PHY_SPEED
Definition bnxt.h:207
#define BNXT_FLAG_IS_CHIP_P5
Definition bnxt.h:49
#define DBC_DBC_TYPE_NQ_ARM
Definition bnxt.h:443
#define CMPL_BASE_V
Definition bnxt.h:542
#define CQ_RING_BUFFER_SIZE
Definition bnxt.h:153
#define DMA_BUFFER_SIZE
Definition bnxt.h:158
#define BNXT_FLAG_IS_CHIP_P7
Definition bnxt.h:51
#define CMPL_DOORBELL_KEY_CMPL
Definition bnxt.h:406
#define DBC_MSG_IDX(idx)
Definition bnxt.h:197
#define RX_DOORBELL_KEY_RX
Definition bnxt.h:394
#define BNXT_FLAG_LINK_SPEEDS2
Definition bnxt.h:48
#define CMPL_BASE_TYPE_STAT_EJECT
Definition bnxt.h:528
#define MEDIUM_SPEED_1000MBPS
Definition bnxt.h:97
#define MEDIUM_SPEED_10GBPS
Definition bnxt.h:99
#define BNXT_DMA_ALIGNMENT
Definition bnxt.h:154
#define MEDIUM_SPEED_2500MBPS
Definition bnxt.h:98
#define BNXT_FLAG_MULTI_HOST
Definition bnxt.h:44
#define ER_DFLT_FW_RST_MIN_DSECS
Definition bnxt.h:940
#define MEDIUM_SPEED_AUTONEG
Definition bnxt.h:93
#define LINK_SPEED_FW_400G_PAM4_112
Definition bnxt.h:327
#define GRC_COM_CHAN_BASE
Definition bnxt.h:126
#define RX_MASK_ACCEPT_NONE
Definition bnxt.h:83
#define NQ_CN_TOGGLE_SFT
Definition bnxt.h:571
#define HWRM_CMD_DEFAULT_MULTIPLAYER(a)
Definition bnxt.h:136
#define STATUS_SUCCESS
Definition bnxt.h:55
#define MAC_HDR_SIZE
Definition bnxt.h:189
#define QCFG_PHY_ALL
Definition bnxt.h:211
#define VALID_RING_CQ
Definition bnxt.h:901
#define BNXT_FLAG_HWRM_SHORT_CMD_SUPP
Definition bnxt.h:41
#define LM_PAGE_BITS(a)
Definition bnxt.h:159
#define LINK_SPEED_FW_25G
Definition bnxt.h:307
#define IPXE_VERSION_MAJOR
Definition bnxt.h:31
#define DEFAULT_NUMBER_OF_CMPL_RINGS
Definition bnxt.h:141
#define BNXT_ER_WAIT_TIMER_INTERVAL(x)
Definition bnxt.h:1108
#define SPEED_FW_MASK
Definition bnxt.h:1086
#define NQ_CN_V
Definition bnxt.h:587
#define TX_AVAIL(r)
Definition bnxt.h:176
#define RX_RING_QID
Definition bnxt.h:174
#define GRC_COM_CHAN_TRIG
Definition bnxt.h:127
#define MEDIA_AUTO_DETECT_SHIFT
Definition bnxt.h:1091
#define BNXT_FLAG_HWRM_SHORT_CMD_REQ
Definition bnxt.h:42
#define CHIP_NUM_57508
Definition bnxt.h:1102
#define dbg_rx_cid(idx, cid)
Definition bnxt_dbg.h:463
#define dump_evt(cq, ty, id, ring)
Definition bnxt_dbg.h:660
#define dbg_alloc_rx_iob_fail(iob_idx, cons_id)
Definition bnxt_dbg.h:464
#define dbg_hw_cmd(bp, func, cmd_len, resp_len, cmd_tmo, err)
Definition bnxt_dbg.h:378
#define dbg_pci(bp, func, creg)
Definition bnxt_dbg.h:140
#define dbg_alloc_rx_iob(iob, id, cid)
Definition bnxt_dbg.h:462
#define dbg_tx_avail(bp, a, u)
Definition bnxt_dbg.h:563
#define dbg_rx_stat(bp)
Definition bnxt_dbg.h:466
#define dbg_tx_done(pkt, len, idx)
Definition bnxt_dbg.h:569
#define dbg_fw_ver(resp, tmo)
Definition bnxt_dbg.h:319
#define dbg_func_qcaps(bp)
Definition bnxt_dbg.h:321
#define dump_cq(cq, id, toggle)
Definition bnxt_dbg.h:508
#define dbg_link_state(bp, tmo)
Definition bnxt_dbg.h:663
#define dbg_tx_vlan(bp, src, plen, len)
Definition bnxt_dbg.h:564
#define dbg_func_qcfg(bp)
Definition bnxt_dbg.h:322
#define prn_set_speed(speed)
Definition bnxt_dbg.h:323
#define dbg_mem(bp, func)
Definition bnxt_dbg.h:175
#define dbg_rxp(iob, rx_len, drop)
Definition bnxt_dbg.h:465
#define dbg_link_info(bp)
Definition bnxt_dbg.h:662
#define dbg_flags(func, flags)
Definition bnxt_dbg.h:326
#define dbg_func_resource_qcaps(bp)
Definition bnxt_dbg.h:320
#define dbg_short_cmd(sreq, func, len)
Definition bnxt_dbg.h:398
#define dbg_link_status(bp)
Definition bnxt_dbg.h:661
#define dbg_num_rings(bp)
Definition bnxt_dbg.h:325
#define dump_tx_pkt(pkt, len, idx)
Definition bnxt_dbg.h:567
#define dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx)
Definition bnxt_dbg.h:461
#define dump_tx_stat(bp)
Definition bnxt_dbg.h:566
#define dump_nq(nq, id)
Definition bnxt_dbg.h:509
#define dbg_chip_info(bp)
Definition bnxt_dbg.h:324
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_SUPPORTED_LINK_SPEEDS_CHANGE
Definition bnxt_hsi.h:786
#define ASYNC_EVENT_CMPL_ER_EVENT_DATA1_MASTER_FUNC
Definition bnxt_hsi.h:588
#define HWRM_VNIC_FREE
Definition bnxt_hsi.h:150
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT
Definition bnxt_hsi.h:5769
#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_FORCE
Definition bnxt_hsi.h:664
#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT
Definition bnxt_hsi.h:6562
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
Definition bnxt_hsi.h:560
#define PORT_MAC_CFG_REQ_LPBK_NONE
Definition bnxt_hsi.h:3646
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX
Definition bnxt_hsi.h:3110
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
Definition bnxt_hsi.h:1556
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB
Definition bnxt_hsi.h:3168
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY
Definition bnxt_hsi.h:563
#define HWRM_RING_ALLOC
Definition bnxt_hsi.h:160
#define FUNC_CFG_REQ_ENABLES_NUM_MSIX
Definition bnxt_hsi.h:1558
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1753
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56
Definition bnxt_hsi.h:3186
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2
Definition bnxt_hsi.h:3080
#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
Definition bnxt_hsi.h:3042
#define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_FEC_CFG_CHANGE
Definition bnxt_hsi.h:810
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR1
Definition bnxt_hsi.h:1758
#define HWRM_RING_FREE
Definition bnxt_hsi.h:161
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_PCIE_CFG
Definition bnxt_hsi.h:1755
#define HWRM_VERSION_MINOR
Definition bnxt_hsi.h:370
#define HWRM_MAX_REQ_LEN
Definition bnxt_hsi.h:364
#define HWRM_CFA_L2_FILTER_FREE
Definition bnxt_hsi.h:173
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112
Definition bnxt_hsi.h:3190
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX
Definition bnxt_hsi.h:3068
#define RING_FREE_REQ_RING_TYPE_NQ
Definition bnxt_hsi.h:6301
#define HWRM_PORT_PHY_CFG
Definition bnxt_hsi.h:119
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112
Definition bnxt_hsi.h:3191
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
Definition bnxt_hsi.h:6683
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST
Definition bnxt_hsi.h:1417
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56
Definition bnxt_hsi.h:3188
#define HWRM_VNIC_ALLOC
Definition bnxt_hsi.h:149
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE
Definition bnxt_hsi.h:561
#define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_PAUSE_CFG_CHANGE
Definition bnxt_hsi.h:812
#define RING_ALLOC_REQ_INT_MODE_POLL
Definition bnxt_hsi.h:6270
#define HWRM_PORT_PHY_QCAPS
Definition bnxt_hsi.h:129
#define HWRM_VNIC_CFG
Definition bnxt_hsi.h:151
#define ER_QCFG_RESET_REG_ADDR_MASK
Definition bnxt_hsi.h:1803
#define RING_ALLOC_REQ_RING_TYPE_RX
Definition bnxt_hsi.h:6216
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB
Definition bnxt_hsi.h:3184
#define ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_MASK
Definition bnxt_hsi.h:591
#define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY
Definition bnxt_hsi.h:562
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK
Definition bnxt_hsi.h:6530
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56
Definition bnxt_hsi.h:3187
#define HWRM_FUNC_RESOURCE_QCAPS
Definition bnxt_hsi.h:281
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP
Definition bnxt_hsi.h:5821
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB
Definition bnxt_hsi.h:3090
#define ASYNC_EVENT_CMPL_ER_EVENT_DATA1_RECOVERY_ENABLED
Definition bnxt_hsi.h:589
#define ER_QCFG_FW_HEALTH_REG_ADDR_MASK
Definition bnxt_hsi.h:1760
#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
Definition bnxt_hsi.h:1542
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB
Definition bnxt_hsi.h:3085
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB
Definition bnxt_hsi.h:3091
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX
Definition bnxt_hsi.h:3111
#define ER_QCFG_FW_HB_REG_ADDR_MASK
Definition bnxt_hsi.h:1770
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB
Definition bnxt_hsi.h:3183
#define HWRM_FUNC_QCAPS
Definition bnxt_hsi.h:108
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID
Definition bnxt_hsi.h:5827
#define RING_FREE_REQ_RING_TYPE_L2_CMPL
Definition bnxt_hsi.h:6296
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT
Definition bnxt_hsi.h:1835
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56
Definition bnxt_hsi.h:3189
#define SHORT_REQ_SIGNATURE_SHORT_CMD
Definition bnxt_hsi.h:88
#define FUNC_CFG_REQ_EVB_MODE_NO_EVB
Definition bnxt_hsi.h:1614
#define PORT_PHY_QCFG_RESP_LINK_LINK
Definition bnxt_hsi.h:3251
#define FUNC_DRV_RGTR_REQ_ENABLES_VER
Definition bnxt_hsi.h:1838
#define HWRM_CFA_L2_SET_RX_MASK
Definition bnxt_hsi.h:175
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN
Definition bnxt_hsi.h:1889
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST
Definition bnxt_hsi.h:6521
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE
Definition bnxt_hsi.h:2162
#define HWRM_NVM_GET_VARIABLE
Definition bnxt_hsi.h:309
#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME
Definition bnxt_hsi.h:1181
#define HWRM_FUNC_QCFG
Definition bnxt_hsi.h:109
#define HWRM_FUNC_BACKING_STORE_QCFG
Definition bnxt_hsi.h:285
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0
Definition bnxt_hsi.h:1433
#define HWRM_RING_GRP_ALLOC
Definition bnxt_hsi.h:166
#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
Definition bnxt_hsi.h:6681
#define RING_ALLOC_REQ_RING_TYPE_NQ
Definition bnxt_hsi.h:6219
#define HWRM_QUEUE_QPORTCFG
Definition bnxt_hsi.h:135
#define HWRM_FUNC_DRV_UNRGTR
Definition bnxt_hsi.h:113
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
Definition bnxt_hsi.h:3069
#define HWRM_VER_GET
Definition bnxt_hsi.h:98
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK
Definition bnxt_hsi.h:3071
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE
Definition bnxt_hsi.h:3067
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
Definition bnxt_hsi.h:6684
#define HWRM_FUNC_DRV_RGTR
Definition bnxt_hsi.h:116
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
Definition bnxt_hsi.h:3192
#define HWRM_PORT_MAC_CFG
Definition bnxt_hsi.h:120
#define HWRM_VERSION_MAJOR
Definition bnxt_hsi.h:369
#define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EEE_CFG_CHANGE
Definition bnxt_hsi.h:811
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB
Definition bnxt_hsi.h:3093
#define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1783
#define RING_FREE_REQ_RING_TYPE_TX
Definition bnxt_hsi.h:6297
#define RING_ALLOC_REQ_RING_TYPE_TX
Definition bnxt_hsi.h:6215
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
Definition bnxt_hsi.h:3170
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
Definition bnxt_hsi.h:1841
#define HWRM_FUNC_VF_CFG
Definition bnxt_hsi.h:102
#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR
Definition bnxt_hsi.h:1551
#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER
Definition bnxt_hsi.h:1844
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
Definition bnxt_hsi.h:6517
#define FUNC_CFG_REQ_ENABLES_EVB_MODE
Definition bnxt_hsi.h:1554
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_GRC
Definition bnxt_hsi.h:1756
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT
Definition bnxt_hsi.h:1834
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB
Definition bnxt_hsi.h:3181
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
Definition bnxt_hsi.h:3107
#define HWRM_VERSION_UPDATE
Definition bnxt_hsi.h:371
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL
Definition bnxt_hsi.h:6214
#define HWRM_PORT_PHY_QCFG
Definition bnxt_hsi.h:126
#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
Definition bnxt_hsi.h:1545
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB
Definition bnxt_hsi.h:3092
#define HWRM_CFA_L2_FILTER_ALLOC
Definition bnxt_hsi.h:172
#define ER_QCFG_RESET_INPRG_REG_ADDR_MASK
Definition bnxt_hsi.h:1790
#define HWRM_ER_QCFG
Definition bnxt_hsi.h:99
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB
Definition bnxt_hsi.h:3182
#define FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE
Definition bnxt_hsi.h:1366
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK
Definition bnxt_hsi.h:3081
#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_MASK
Definition bnxt_hsi.h:665
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_ILLEGAL_LINK_SPEED_CFG
Definition bnxt_hsi.h:787
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
Definition bnxt_hsi.h:554
#define HWRM_FUNC_BACKING_STORE_CFG
Definition bnxt_hsi.h:284
#define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE
Definition bnxt_hsi.h:1831
#define HWRM_NA_SIGNATURE
Definition bnxt_hsi.h:363
#define VNIC_CFG_REQ_ENABLES_MRU
Definition bnxt_hsi.h:5825
#define ER_QCFG_FW_HB_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1763
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR
Definition bnxt_hsi.h:6529
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID
Definition bnxt_hsi.h:5826
#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
Definition bnxt_hsi.h:3102
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED
Definition bnxt_hsi.h:3078
#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
Definition bnxt_hsi.h:6682
#define FUNC_CFG_REQ_ENABLES_NUM_VNICS
Definition bnxt_hsi.h:1544
#define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1808
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB
Definition bnxt_hsi.h:3185
#define ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_FATAL
Definition bnxt_hsi.h:590
#define PORT_PHY_CFG_REQ_FLAGS_FORCE
Definition bnxt_hsi.h:3044
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID
Definition bnxt_hsi.h:6211
#define ER_QCFG_RESET_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1796
#define RING_FREE_REQ_RING_TYPE_RX
Definition bnxt_hsi.h:6298
#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
Definition bnxt_hsi.h:1541
#define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_MASK
Definition bnxt_hsi.h:1773
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID
Definition bnxt_hsi.h:6544
#define HWRM_STAT_CTX_ALLOC
Definition bnxt_hsi.h:191
#define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED
Definition bnxt_hsi.h:4237
#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
Definition bnxt_hsi.h:1540
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB
Definition bnxt_hsi.h:3169
#define HWRM_FUNC_RESET
Definition bnxt_hsi.h:104
#define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR0
Definition bnxt_hsi.h:1757
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE
Definition bnxt_hsi.h:556
#define ER_QCFG_RCVRY_CNT_REG_ADDR_MASK
Definition bnxt_hsi.h:1815
#define HWRM_FUNC_CFG
Definition bnxt_hsi.h:110
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE
Definition bnxt_hsi.h:1837
#define HWRM_STAT_CTX_FREE
Definition bnxt_hsi.h:192
#define HWRM_RING_GRP_FREE
Definition bnxt_hsi.h:167
#define ER_QCFG_FW_RESET_CNT_REG_ADDR_MASK
Definition bnxt_hsi.h:1780
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID
Definition bnxt_hsi.h:6212
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB
Definition bnxt_hsi.h:3088
struct bofm_section_header done
Definition bofm_test.c:46
ring len
Length.
Definition dwmac.h:226
#define ARRAY_SIZE(x)
Definition efx_common.h:43
uint32_t type
Operating system type.
Definition ena.h:1
uint8_t flags
Flags.
Definition ena.h:7
struct ena_llq_option desc
Descriptor counts.
Definition ena.h:9
Error codes.
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition ethernet.c:265
Ethernet protocol.
static int is_valid_ether_addr(const void *addr)
Check if Ethernet address is valid.
Definition ethernet.h:78
static struct net_device * netdev
Definition gdbudp.c:53
#define __unused
Declare a variable or data structure as unused.
Definition compiler.h:598
#define DBGP(...)
Definition compiler.h:557
#define FILE_LICENCE(_licence)
Declare a particular licence as applying to a file.
Definition compiler.h:921
#define EINVAL
Invalid argument.
Definition errno.h:429
#define ENOMEM
Not enough space.
Definition errno.h:535
#define EBUSY
Device or resource busy.
Definition errno.h:339
#define ENOBUFS
No buffer space available.
Definition errno.h:499
#define ETH_ALEN
Definition if_ether.h:9
#define u8
Definition igbvf_osdep.h:40
#define barrier()
Optimisation barrier.
Definition compiler.h:658
void mb(void)
Memory barrier.
#define wmb()
Definition io.h:546
#define writeq(data, io_addr)
Definition io.h:273
void iounmap(volatile const void *io_addr)
Unmap I/O address.
int pci_read_config_dword(struct pci_device *pci, unsigned int where, uint32_t *value)
Read 32-bit dword from PCI configuration space.
int pci_read_config_word(struct pci_device *pci, unsigned int where, uint16_t *value)
Read 16-bit word from PCI configuration space.
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
int pci_write_config_word(struct pci_device *pci, unsigned int where, uint16_t value)
Write 16-bit word to PCI configuration space.
int pci_read_config_byte(struct pci_device *pci, unsigned int where, uint8_t *value)
Read byte from PCI configuration space.
int pci_write_config_dword(struct pci_device *pci, unsigned int where, uint32_t value)
Write 32-bit dword to PCI configuration space.
iPXE timers
void __asmcall int val
Definition setjmp.h:12
uint64_t u64
Definition stdint.h:26
String functions.
void * memcpy(void *dest, const void *src, size_t len) __nonnull
void * memset(void *dest, int character, size_t len) __nonnull
void * memmove(void *dest, const void *src, size_t len) __nonnull
struct io_buffer * alloc_rx_iob(size_t len, struct dma_device *dma)
Allocate and map I/O buffer for receive DMA.
Definition iobuf.c:188
void free_rx_iob(struct io_buffer *iobuf)
Unmap and free I/O buffer for receive DMA.
Definition iobuf.c:215
I/O buffers.
#define iob_put(iobuf, len)
Definition iobuf.h:125
static __always_inline physaddr_t iob_dma(struct io_buffer *iobuf)
Get I/O buffer DMA address.
Definition iobuf.h:268
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition iobuf.h:160
DMA mappings.
void dma_free(struct dma_mapping *map, void *addr, size_t len)
Unmap and free DMA-coherent buffer.
void * dma_alloc(struct dma_device *dma, struct dma_mapping *map, size_t len, size_t align)
Allocate and map DMA-coherent buffer.
Dynamic memory allocation.
Media Independent Interface constants.
static unsigned int unsigned int reg
Definition myson.h:162
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition netdevice.c:231
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition netdevice.c:549
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition netdevice.c:946
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition netdevice.c:587
int register_netdev(struct net_device *netdev)
Register network device.
Definition netdevice.c:760
Network device management.
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition netdevice.h:792
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition netdevice.h:522
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition netdevice.h:535
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition netdevice.h:579
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition netdevice.h:770
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Get the size of a PCI BAR.
Definition pci.c:164
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition pci.c:241
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition pci.c:97
PCI bus.
#define __pci_driver
Declare a PCI driver.
Definition pci.h:278
#define PCI_SUBSYSTEM_ID
PCI subsystem ID.
Definition pci.h:79
#define PCI_COMMAND_MASTER
Bus master.
Definition pci.h:29
#define PCI_BASE_ADDRESS_2
Definition pci.h:65
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition pci.h:366
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition pci.h:308
#define PCI_COMMAND_INTX_DISABLE
Interrupt disable.
Definition pci.h:33
#define PCI_BASE_ADDRESS_0
Definition pci.h:63
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition pci.h:376
#define PCI_COMMAND
PCI command.
Definition pci.h:26
#define PCI_BASE_ADDRESS_4
Definition pci.h:67
#define PCI_SUBSYSTEM_VENDOR_ID
PCI subsystem vendor ID.
Definition pci.h:76
uint16_t bp
Definition registers.h:9
void start_timer_fixed(struct retry_timer *timer, unsigned long timeout)
Start timer with a specified timeout.
Definition retry.c:65
void stop_timer(struct retry_timer *timer)
Stop timer.
Definition retry.c:118
#define container_of(ptr, type, field)
Get containing structure.
Definition stddef.h:36
Definition bnxt.h:949
struct retry_timer wait_timer
Definition bnxt.h:987
struct net_device * dev
Definition bnxt.h:967
struct retry_timer task_timer
Definition bnxt.h:986
u16 type
Definition bnxt.h:519
u32 info3_v
Definition bnxt.h:541
__le16 req_type
Definition bnxt_hsi.h:86
__le16 signature
Definition bnxt_hsi.h:87
__le64 req_addr
Definition bnxt_hsi.h:92
__le16 default_rx_ring_id
Definition bnxt_hsi.h:5834
__le16 default_cmpl_ring_id
Definition bnxt_hsi.h:5835
__le16 seq_id
Definition bnxt_hsi.h:71
__le16 req_type
Definition bnxt_hsi.h:69
__le16 target_id
Definition bnxt_hsi.h:72
__le64 resp_addr
Definition bnxt_hsi.h:73
__le16 cmpl_ring
Definition bnxt_hsi.h:70
A persistent I/O buffer.
Definition iobuf.h:38
void * data
Start of data.
Definition iobuf.h:53
Network device operations.
Definition netdevice.h:214
A network device.
Definition netdevice.h:353
void * priv
Driver private data.
Definition netdevice.h:432
struct device * dev
Underlying hardware device.
Definition netdevice.h:365
u16 type
Definition bnxt.h:560
u32 v
Definition bnxt.h:581
__le16 error_code
Definition bnxt_hsi.h:78
__le16 req_type
Definition bnxt_hsi.h:79
__le16 resp_len
Definition bnxt_hsi.h:81
__le16 seq_id
Definition bnxt_hsi.h:80
A PCI device ID list entry.
Definition pci.h:175
unsigned long driver_data
Arbitrary driver data.
Definition pci.h:183
A PCI device.
Definition pci.h:211
struct device dev
Generic device.
Definition pci.h:213
struct pci_device_id * id
Driver device ID.
Definition pci.h:248
struct dma_device dma
DMA device.
Definition pci.h:215
A PCI driver.
Definition pci.h:252
A retry timer.
Definition retry.h:22
u16 errors_v2
Definition bnxt.h:669
u32 opaque
Definition bnxt.h:635
u16 len
Definition bnxt.h:634
u16 flags_type
Definition bnxt.h:717
A timer.
Definition timer.h:29
physaddr_t dma
Definition bnxt.h:473
u32 opaque
Definition bnxt.h:472
u16 len
Definition bnxt.h:471
u16 flags_type
Definition bnxt.h:452
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition timer.c:79
void udelay(unsigned long usecs)
Delay for a fixed number of microseconds.
Definition timer.c:61
uint32_t data_len
Microcode data size (or 0 to indicate 2000 bytes)
Definition ucode.h:15
#define u16
Definition vga.h:20
#define u32
Definition vga.h:21
#define readl
Definition w89c840.c:157
#define writel
Definition w89c840.c:160
u8 tx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets to the AP.
Definition wpa.h:4
u8 rx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets from the AP.
Definition wpa.h:1