iPXE
bnxt.c
Go to the documentation of this file.
1 
2 FILE_LICENCE ( GPL2_ONLY );
3 
4 #include <mii.h>
5 #include <stdio.h>
6 #include <errno.h>
7 #include <unistd.h>
8 #include <byteswap.h>
9 #include <ipxe/pci.h>
10 #include <ipxe/iobuf.h>
11 #include <ipxe/timer.h>
12 #include <ipxe/malloc.h>
13 #include <ipxe/if_ether.h>
14 #include <ipxe/ethernet.h>
15 #include <ipxe/netdevice.h>
16 #include "bnxt.h"
17 #include "bnxt_dbg.h"
18 
19 static void bnxt_service_cq ( struct net_device *dev );
20 static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx );
21 static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt );
22 static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt );
23 static int bnxt_rx_complete ( struct net_device *dev, struct rx_pkt_cmpl *rx );
24 void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt );
25 
26 static struct pci_device_id bnxt_nics[] = {
27  PCI_ROM( 0x14e4, 0x1604, "14e4-1604", "14e4-1604", 0 ),
28  PCI_ROM( 0x14e4, 0x1605, "14e4-1605", "14e4-1605", 0 ),
29  PCI_ROM( 0x14e4, 0x1606, "14e4-1606", "14e4-1606", 0 ),
30  PCI_ROM( 0x14e4, 0x1609, "14e4-1609", "14e4-1609", 0 ),
31  PCI_ROM( 0x14e4, 0x1614, "14e4-1614", "14e4-1614", 0 ),
32  PCI_ROM( 0x14e4, 0x16c0, "14e4-16C0", "14e4-16C0", 0 ),
33  PCI_ROM( 0x14e4, 0x16c1, "14e4-16C1", "14e4-16C1", BNXT_FLAG_PCI_VF ),
34  PCI_ROM( 0x14e4, 0x16c8, "14e4-16C8", "14e4-16C8", 0 ),
35  PCI_ROM( 0x14e4, 0x16c9, "14e4-16C9", "14e4-16C9", 0 ),
36  PCI_ROM( 0x14e4, 0x16ca, "14e4-16CA", "14e4-16CA", 0 ),
37  PCI_ROM( 0x14e4, 0x16cc, "14e4-16CC", "14e4-16CC", 0 ),
38  PCI_ROM( 0x14e4, 0x16cd, "14e4-16CD", "14e4-16CD", 0 ),
39  PCI_ROM( 0x14e4, 0x16ce, "14e4-16CE", "14e4-16CE", 0 ),
40  PCI_ROM( 0x14e4, 0x16cf, "14e4-16CF", "14e4-16CF", 0 ),
41  PCI_ROM( 0x14e4, 0x16d0, "14e4-16D0", "14e4-16D0", 0 ),
42  PCI_ROM( 0x14e4, 0x16d1, "14e4-16D1", "14e4-16D1", 0 ),
43  PCI_ROM( 0x14e4, 0x16d2, "14e4-16D2", "14e4-16D2", 0 ),
44  PCI_ROM( 0x14e4, 0x16d4, "14e4-16D4", "14e4-16D4", 0 ),
45  PCI_ROM( 0x14e4, 0x16d5, "14e4-16D5", "14e4-16D5", 0 ),
46  PCI_ROM( 0x14e4, 0x16d6, "14e4-16D6", "14e4-16D6", 0 ),
47  PCI_ROM( 0x14e4, 0x16d7, "14e4-16D7", "14e4-16D7", 0 ),
48  PCI_ROM( 0x14e4, 0x16d8, "14e4-16D8", "14e4-16D8", 0 ),
49  PCI_ROM( 0x14e4, 0x16d9, "14e4-16D9", "14e4-16D9", 0 ),
50  PCI_ROM( 0x14e4, 0x16da, "14e4-16DA", "14e4-16DA", 0 ),
51  PCI_ROM( 0x14e4, 0x16db, "14e4-16DB", "14e4-16DB", 0 ),
52  PCI_ROM( 0x14e4, 0x16dc, "14e4-16DC", "14e4-16DC", BNXT_FLAG_PCI_VF ),
53  PCI_ROM( 0x14e4, 0x16de, "14e4-16DE", "14e4-16DE", 0 ),
54  PCI_ROM( 0x14e4, 0x16df, "14e4-16DF", "14e4-16DF", 0 ),
55  PCI_ROM( 0x14e4, 0x16e0, "14e4-16E0", "14e4-16E0", 0 ),
56  PCI_ROM( 0x14e4, 0x16e2, "14e4-16E2", "14e4-16E2", 0 ),
57  PCI_ROM( 0x14e4, 0x16e3, "14e4-16E3", "14e4-16E3", 0 ),
58  PCI_ROM( 0x14e4, 0x16e4, "14e4-16E4", "14e4-16E4", 0 ),
59  PCI_ROM( 0x14e4, 0x16e7, "14e4-16E7", "14e4-16E7", 0 ),
60  PCI_ROM( 0x14e4, 0x16e8, "14e4-16E8", "14e4-16E8", 0 ),
61  PCI_ROM( 0x14e4, 0x16e9, "14e4-16E9", "14e4-16E9", 0 ),
62  PCI_ROM( 0x14e4, 0x16ea, "14e4-16EA", "14e4-16EA", 0 ),
63  PCI_ROM( 0x14e4, 0x16eb, "14e4-16EB", "14e4-16EB", 0 ),
64  PCI_ROM( 0x14e4, 0x16ec, "14e4-16EC", "14e4-16EC", 0 ),
65  PCI_ROM( 0x14e4, 0x16ed, "14e4-16ED", "14e4-16ED", 0 ),
66  PCI_ROM( 0x14e4, 0x16ee, "14e4-16EE", "14e4-16EE", 0 ),
67  PCI_ROM( 0x14e4, 0x16ef, "14e4-16EF", "14e4-16EF", 0 ),
68  PCI_ROM( 0x14e4, 0x16f0, "14e4-16F0", "14e4-16F0", 0 ),
69  PCI_ROM( 0x14e4, 0x16f1, "14e4-16F1", "14e4-16F1", 0 ),
70  PCI_ROM( 0x14e4, 0x1750, "14e4-1750", "14e4-1750", 0 ),
71  PCI_ROM( 0x14e4, 0x1751, "14e4-1751", "14e4-1751", 0 ),
72  PCI_ROM( 0x14e4, 0x1752, "14e4-1752", "14e4-1752", 0 ),
73  PCI_ROM( 0x14e4, 0x1760, "14e4-1760", "14e4-1760", 0 ),
74  PCI_ROM( 0x14e4, 0x1800, "14e4-1800", "14e4-1800", 0 ),
75  PCI_ROM( 0x14e4, 0x1801, "14e4-1801", "14e4-1801", 0 ),
76  PCI_ROM( 0x14e4, 0x1802, "14e4-1802", "14e4-1802", 0 ),
77  PCI_ROM( 0x14e4, 0x1803, "14e4-1803", "14e4-1803", 0 ),
78  PCI_ROM( 0x14e4, 0x1804, "14e4-1804", "14e4-1804", 0 ),
79  PCI_ROM( 0x14e4, 0x1805, "14e4-1805", "14e4-1805", 0 ),
80  PCI_ROM( 0x14e4, 0x1806, "14e4-1806", "14e4-1806", BNXT_FLAG_PCI_VF ),
81  PCI_ROM( 0x14e4, 0x1807, "14e4-1807", "14e4-1807", BNXT_FLAG_PCI_VF ),
82  PCI_ROM( 0x14e4, 0x1808, "14e4-1808", "14e4-1808", BNXT_FLAG_PCI_VF ),
83  PCI_ROM( 0x14e4, 0x1809, "14e4-1809", "14e4-1809", BNXT_FLAG_PCI_VF ),
84  PCI_ROM( 0x14e4, 0xd802, "14e4-D802", "14e4-D802", 0 ),
85  PCI_ROM( 0x14e4, 0xd804, "14e4-D804", "14e4-D804", 0 ),
86 };
87 
88 /**
89  * Check if Virtual Function
90  */
91 u8 bnxt_is_pci_vf ( struct pci_device *pdev )
92 {
93  if ( FLAG_TEST ( pdev->id->driver_data, BNXT_FLAG_PCI_VF ) ) {
94  return 1;
95  }
96  return 0;
97 }
98 
99 static void bnxt_down_pci ( struct bnxt *bp )
100 {
101  DBGP ( "%s\n", __func__ );
102  if ( bp->bar2 ) {
103  iounmap ( bp->bar2 );
104  bp->bar2 = NULL;
105  }
106  if ( bp->bar1 ) {
107  iounmap ( bp->bar1 );
108  bp->bar1 = NULL;
109  }
110  if ( bp->bar0 ) {
111  iounmap ( bp->bar0 );
112  bp->bar0 = NULL;
113  }
114 }
115 
116 static void *bnxt_pci_base ( struct pci_device *pdev, unsigned int reg )
117 {
118  unsigned long reg_base, reg_size;
119 
120  reg_base = pci_bar_start ( pdev, reg );
121  reg_size = pci_bar_size ( pdev, reg );
122  return pci_ioremap ( pdev, reg_base, reg_size );
123 }
124 
125 static int bnxt_get_pci_info ( struct bnxt *bp )
126 {
127  u16 cmd_reg = 0;
128 
129  DBGP ( "%s\n", __func__ );
130  /* Disable Interrupt */
131  pci_read_word16 ( bp->pdev, PCI_COMMAND, &bp->cmd_reg );
132  cmd_reg = bp->cmd_reg | PCI_COMMAND_INTX_DISABLE;
133  pci_write_word ( bp->pdev, PCI_COMMAND, cmd_reg );
134  pci_read_word16 ( bp->pdev, PCI_COMMAND, &cmd_reg );
135 
136  /* SSVID */
137  pci_read_word16 ( bp->pdev,
139  &bp->subsystem_vendor );
140 
141  /* SSDID */
142  pci_read_word16 ( bp->pdev,
144  &bp->subsystem_device );
145 
146  /* Function Number */
147  pci_read_byte ( bp->pdev,
149  &bp->pf_num );
150 
151  /* Get Bar Address */
152  bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 );
153  bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 );
154  bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 );
155 
156  /* Virtual function */
157  bp->vf = bnxt_is_pci_vf ( bp->pdev );
158 
159  dbg_pci ( bp, __func__, cmd_reg );
160  return STATUS_SUCCESS;
161 }
162 
163 static int bnxt_get_device_address ( struct bnxt *bp )
164 {
165  struct net_device *dev = bp->dev;
166 
167  DBGP ( "%s\n", __func__ );
168  memcpy ( &dev->hw_addr[0], ( char * )&bp->mac_addr[0], ETH_ALEN );
169  if ( !is_valid_ether_addr ( &dev->hw_addr[0] ) ) {
170  DBGP ( "- %s ( ): Failed\n", __func__ );
171  return -EINVAL;
172  }
173 
174  return STATUS_SUCCESS;
175 }
176 
177 static void bnxt_set_link ( struct bnxt *bp )
178 {
179  if ( bp->link_status == STATUS_LINK_ACTIVE )
180  netdev_link_up ( bp->dev );
181  else
182  netdev_link_down ( bp->dev );
183 }
184 
185 static void dev_p5_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag )
186 {
187  void *off;
188  u64 val;
189 
190  if ( bp->vf )
191  off = ( void * ) ( bp->bar1 + DB_OFFSET_VF );
192  else
193  off = ( void * ) ( bp->bar1 + DB_OFFSET_PF );
194 
195  val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) |
196  ( u64 )DBC_MSG_IDX ( idx );
197  write64 ( val, off );
198 }
199 
200 static void dev_p7_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, u32 toggle )
201 {
202  void *off;
203  u64 val;
204 
205  off = ( void * ) ( bp->bar1 );
206 
207  val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) |
208  ( u64 )DBC_MSG_IDX ( idx ) |
209  ( u64 )DBC_MSG_EPCH ( epoch ) |
210  ( u64 )DBC_MSG_TOGGLE ( toggle );
211  write64 ( val, off );
212 }
213 
214 static void bnxt_db_nq ( struct bnxt *bp )
215 {
216  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
217  dev_p7_db ( bp, ( u32 )bp->nq.cons_id,
218  ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM,
219  ( u32 )bp->nq.epoch, 0 );
220  else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
221  dev_p5_db ( bp, ( u32 )bp->nq.cons_id,
222  ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM );
223  else
224  write32 ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) );
225 }
226 
227 static void bnxt_db_cq ( struct bnxt *bp )
228 {
229  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
230  dev_p7_db ( bp, ( u32 )bp->cq.cons_id,
231  ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL,
232  ( u32 )bp->cq.epoch, ( u32 )bp->nq.toggle );
233  else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
234  dev_p5_db ( bp, ( u32 )bp->cq.cons_id,
235  ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL );
236  else
237  write32 ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ),
238  ( bp->bar1 + 0 ) );
239 }
240 
241 static void bnxt_db_rx ( struct bnxt *bp, u32 idx )
242 {
243  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
244  dev_p7_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ,
245  ( u32 )bp->rx.epoch, 0 );
246  else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
247  dev_p5_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ );
248  else
249  write32 ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) );
250 }
251 
252 static void bnxt_db_tx ( struct bnxt *bp, u32 idx )
253 {
254  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
255  dev_p7_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ,
256  ( u32 )bp->tx.epoch, 0 );
257  else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
258  dev_p5_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ );
259  else
260  write32 ( ( u32 ) ( TX_DOORBELL_KEY_TX | idx ),
261  ( bp->bar1 + 0 ) );
262 }
263 
264 void bnxt_add_vlan ( struct io_buffer *iob, u16 vlan )
265 {
266  char *src = ( char * )iob->data;
267  u16 len = iob_len ( iob );
268 
269  memmove ( ( char * )&src[MAC_HDR_SIZE + VLAN_HDR_SIZE],
270  ( char * )&src[MAC_HDR_SIZE],
271  ( len - MAC_HDR_SIZE ) );
272 
273  * ( u16 * ) ( &src[MAC_HDR_SIZE] ) = BYTE_SWAP_S ( ETHERTYPE_VLAN );
274  * ( u16 * ) ( &src[MAC_HDR_SIZE + 2] ) = BYTE_SWAP_S ( vlan );
275  iob_put ( iob, VLAN_HDR_SIZE );
276 }
277 
278 static u16 bnxt_get_pkt_vlan ( char *src )
279 {
280  if ( * ( ( u16 * )&src[MAC_HDR_SIZE] ) == BYTE_SWAP_S ( ETHERTYPE_VLAN ) )
281  return BYTE_SWAP_S ( * ( ( u16 * )&src[MAC_HDR_SIZE + 2] ) );
282  return 0;
283 }
284 
285 static u16 bnxt_get_rx_vlan ( struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi )
286 {
287  struct rx_pkt_v3_cmpl *rx_cmp_v3 = ( struct rx_pkt_v3_cmpl * )rx_cmp;
288  struct rx_pkt_v3_cmpl_hi *rx_cmp_hi_v3 = ( struct rx_pkt_v3_cmpl_hi * )rx_cmp_hi;
289  u16 rx_vlan;
290 
291  /* Get VLAN ID from RX completion ring */
292  if ( ( rx_cmp_v3->flags_type & RX_PKT_V3_CMPL_TYPE_MASK ) ==
295  rx_vlan = ( rx_cmp_hi_v3->metadata0 &
297  else
298  rx_vlan = 0;
299  } else {
300  if ( rx_cmp_hi->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN )
301  rx_vlan = ( rx_cmp_hi->metadata &
303  else
304  rx_vlan = 0;
305  }
306 
307  return rx_vlan;
308 }
309 
310 int bnxt_vlan_drop ( struct bnxt *bp, u16 rx_vlan )
311 {
312  if ( rx_vlan ) {
313  if ( bp->vlan_tx ) {
314  if ( rx_vlan == bp->vlan_tx )
315  return 0;
316  } else {
317  if ( rx_vlan == bp->vlan_id )
318  return 0;
319  if ( rx_vlan && !bp->vlan_id )
320  return 0;
321  }
322  } else {
323  if ( !bp->vlan_tx && !bp->vlan_id )
324  return 0;
325  }
326 
327  return 1;
328 }
329 
330 static inline u32 bnxt_tx_avail ( struct bnxt *bp )
331 {
332  u32 avail;
333  u32 use;
334 
335  barrier ( );
336  avail = TX_AVAIL ( bp->tx.ring_cnt );
337  use = TX_IN_USE ( bp->tx.prod_id, bp->tx.cons_id, bp->tx.ring_cnt );
338  dbg_tx_avail ( bp, avail, use );
339  return ( avail-use );
340 }
341 
342 void bnxt_set_txq ( struct bnxt *bp, int entry, dma_addr_t mapping, int len )
343 {
344  struct tx_bd_short *prod_bd;
345 
346  prod_bd = ( struct tx_bd_short * )BD_NOW ( bp->tx.bd_virt,
347  entry, sizeof ( struct tx_bd_short ) );
348  if ( len < 512 )
350  else if ( len < 1024 )
352  else if ( len < 2048 )
354  else
356  prod_bd->flags_type |= TX_BD_FLAGS;
357  prod_bd->dma.addr = mapping;
358  prod_bd->len = len;
359  prod_bd->opaque = ( u32 )entry;
360 }
361 
362 static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx )
363 {
364  struct bnxt *bp = dev->priv;
365  struct io_buffer *iob;
366 
367  iob = bp->tx.iob[hw_idx];
368  dbg_tx_done ( iob->data, iob_len ( iob ), hw_idx );
369  netdev_tx_complete ( dev, iob );
370  bp->tx.cons_id = NEXT_IDX ( hw_idx, bp->tx.ring_cnt );
371  bp->tx.cnt++;
372  dump_tx_stat ( bp );
373 }
374 
375 int bnxt_free_rx_iob ( struct bnxt *bp )
376 {
377  unsigned int i;
378 
379  DBGP ( "%s\n", __func__ );
380  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RX_IOB ) ) )
381  return STATUS_SUCCESS;
382 
383  for ( i = 0; i < bp->rx.buf_cnt; i++ ) {
384  if ( bp->rx.iob[i] ) {
385  free_iob ( bp->rx.iob[i] );
386  bp->rx.iob[i] = NULL;
387  }
388  }
389  bp->rx.iob_cnt = 0;
390 
391  FLAG_RESET ( bp->flag_hwrm, VALID_RX_IOB );
392  return STATUS_SUCCESS;
393 }
394 
395 static void bnxt_set_rx_desc ( u8 *buf, struct io_buffer *iob,
396  u16 cid, u32 idx )
397 {
398  struct rx_prod_pkt_bd *desc;
399  u16 off = cid * sizeof ( struct rx_prod_pkt_bd );
400 
401  desc = ( struct rx_prod_pkt_bd * )&buf[off];
404  desc->opaque = idx;
405  desc->dma.addr = virt_to_bus ( iob->data );
406 }
407 
408 static int bnxt_alloc_rx_iob ( struct bnxt *bp, u16 cons_id, u16 iob_idx )
409 {
410  struct io_buffer *iob;
411 
412  iob = alloc_iob ( BNXT_RX_STD_DMA_SZ );
413  if ( !iob ) {
414  DBGP ( "- %s ( ): alloc_iob Failed\n", __func__ );
415  return -ENOMEM;
416  }
417 
418  dbg_alloc_rx_iob ( iob, iob_idx, cons_id );
419  bnxt_set_rx_desc ( ( u8 * )bp->rx.bd_virt, iob, cons_id,
420  ( u32 ) iob_idx );
421  bp->rx.iob[iob_idx] = iob;
422  return 0;
423 }
424 
425 int bnxt_post_rx_buffers ( struct bnxt *bp )
426 {
427  u16 cons_id = ( bp->rx.cons_id % bp->rx.ring_cnt );
428  u16 iob_idx;
429 
430  while ( bp->rx.iob_cnt < bp->rx.buf_cnt ) {
431  iob_idx = ( cons_id % bp->rx.buf_cnt );
432  if ( !bp->rx.iob[iob_idx] ) {
433  if ( bnxt_alloc_rx_iob ( bp, cons_id, iob_idx ) < 0 ) {
434  dbg_alloc_rx_iob_fail ( iob_idx, cons_id );
435  break;
436  }
437  }
438  cons_id = NEXT_IDX ( cons_id, bp->rx.ring_cnt );
439  /* If the ring has wrapped, flip the epoch bit */
440  if ( iob_idx > cons_id )
441  bp->rx.epoch ^= 1;
442  bp->rx.iob_cnt++;
443  }
444 
445  if ( cons_id != bp->rx.cons_id ) {
446  dbg_rx_cid ( bp->rx.cons_id, cons_id );
447  bp->rx.cons_id = cons_id;
448  bnxt_db_rx ( bp, ( u32 )cons_id );
449  }
450 
451  FLAG_SET ( bp->flag_hwrm, VALID_RX_IOB );
452  return STATUS_SUCCESS;
453 }
454 
455 u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob,
456  struct rx_pkt_cmpl *rx_cmp,
457  struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len )
458 {
459  struct rx_pkt_v3_cmpl *rx_cmp_v3 = ( struct rx_pkt_v3_cmpl * )rx_cmp;
460  struct rx_pkt_v3_cmpl_hi *rx_cmp_hi_v3 = ( struct rx_pkt_v3_cmpl_hi * )rx_cmp_hi;
461  u8 *rx_buf = ( u8 * )iob->data;
462  u16 err_flags, rx_vlan;
463  u8 ignore_chksum_err = 0;
464  int i;
465 
466  if ( ( rx_cmp_v3->flags_type & RX_PKT_V3_CMPL_TYPE_MASK ) ==
468  err_flags = rx_cmp_hi_v3->errors_v2 >> RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_SFT;
469  } else
470  err_flags = rx_cmp_hi->errors_v2 >> RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT;
471  if ( rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21 )
472  ignore_chksum_err = 1;
473 
474  if ( err_flags && !ignore_chksum_err ) {
475  bp->rx.drop_err++;
476  return 1;
477  }
478 
479  for ( i = 0; i < 6; i++ ) {
480  if ( rx_buf[6 + i] != bp->mac_addr[i] )
481  break;
482  }
483 
484  /* Drop the loopback packets */
485  if ( i == 6 ) {
486  bp->rx.drop_lb++;
487  return 2;
488  }
489 
490  rx_vlan = bnxt_get_rx_vlan ( rx_cmp, rx_cmp_hi );
491  dbg_rx_vlan ( bp, rx_cmp_hi->metadata, rx_cmp_hi->flags2, rx_vlan );
492  if ( bnxt_vlan_drop ( bp, rx_vlan ) ) {
493  bp->rx.drop_vlan++;
494  return 3;
495  }
496  iob_put ( iob, rx_len );
497 
498  if ( rx_vlan )
499  bnxt_add_vlan ( iob, rx_vlan );
500 
501  bp->rx.good++;
502  return 0;
503 }
504 
505 static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt )
506 {
507  u16 cons_id;
508 
509  cons_id = bp->cq.cons_id + cnt;
510  if ( cons_id >= bp->cq.ring_cnt) {
511  /* Toggle completion bit when the ring wraps. */
512  bp->cq.completion_bit ^= 1;
513  bp->cq.epoch ^= 1;
514  cons_id = cons_id - bp->cq.ring_cnt;
515  }
516  bp->cq.cons_id = cons_id;
517 }
518 
519 void bnxt_rx_process ( struct net_device *dev, struct bnxt *bp,
520  struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi )
521 {
522  u32 desc_idx = rx_cmp->opaque;
523  struct io_buffer *iob = bp->rx.iob[desc_idx];
524  u8 drop;
525 
526  dump_rx_bd ( rx_cmp, rx_cmp_hi, desc_idx );
527  assert ( !iob );
528  drop = bnxt_rx_drop ( bp, iob, rx_cmp, rx_cmp_hi, rx_cmp->len );
529  dbg_rxp ( iob->data, rx_cmp->len, drop );
530  if ( drop )
531  netdev_rx_err ( dev, iob, -EINVAL );
532  else
533  netdev_rx ( dev, iob );
534 
535  bp->rx.cnt++;
536  bp->rx.iob[desc_idx] = NULL;
537  bp->rx.iob_cnt--;
539  bnxt_adv_cq_index ( bp, 2 ); /* Rx completion is 2 entries. */
540  dbg_rx_stat ( bp );
541 }
542 
543 static int bnxt_rx_complete ( struct net_device *dev,
544  struct rx_pkt_cmpl *rx_cmp )
545 {
546  struct bnxt *bp = dev->priv;
547  struct rx_pkt_cmpl_hi *rx_cmp_hi;
548  u8 cmpl_bit = bp->cq.completion_bit;
549 
550  if ( bp->cq.cons_id == ( bp->cq.ring_cnt - 1 ) ) {
551  rx_cmp_hi = ( struct rx_pkt_cmpl_hi * )bp->cq.bd_virt;
552  cmpl_bit ^= 0x1; /* Ring has wrapped. */
553  } else
554  rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) ( rx_cmp+1 );
555 
556  if ( ! ( ( rx_cmp_hi->errors_v2 & RX_PKT_CMPL_V2 ) ^ cmpl_bit ) ) {
557  bnxt_rx_process ( dev, bp, rx_cmp, rx_cmp_hi );
558  return SERVICE_NEXT_CQ_BD;
559  } else
561 }
562 
563 void bnxt_mm_init ( struct bnxt *bp, const char *func )
564 {
565  DBGP ( "%s\n", __func__ );
566  memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
567  memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
568  memset ( bp->hwrm_addr_dma, 0, DMA_BUFFER_SIZE );
569  bp->req_addr_mapping = virt_to_bus ( bp->hwrm_addr_req );
570  bp->resp_addr_mapping = virt_to_bus ( bp->hwrm_addr_resp );
571  bp->dma_addr_mapping = virt_to_bus ( bp->hwrm_addr_dma );
572  bp->link_status = STATUS_LINK_DOWN;
573  bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
575  bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
576  bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
577  bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
578  bp->tx.ring_cnt = MAX_TX_DESC_CNT;
579  bp->rx.ring_cnt = MAX_RX_DESC_CNT;
580  bp->rx.buf_cnt = NUM_RX_BUFFERS;
581  dbg_mem ( bp, func );
582 }
583 
584 void bnxt_mm_nic ( struct bnxt *bp )
585 {
586  DBGP ( "%s\n", __func__ );
587  memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE );
588  memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE );
589  memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE );
590  memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE );
591  bp->nq.cons_id = 0;
592  bp->nq.completion_bit = 0x1;
593  bp->nq.epoch = 0;
594  bp->nq.toggle = 0;
595  bp->cq.cons_id = 0;
596  bp->cq.completion_bit = 0x1;
597  bp->cq.epoch = 0;
598  bp->tx.prod_id = 0;
599  bp->tx.cons_id = 0;
600  bp->tx.epoch = 0;
601  bp->rx.cons_id = 0;
602  bp->rx.iob_cnt = 0;
603  bp->rx.epoch = 0;
604 
605  bp->link_status = STATUS_LINK_DOWN;
606  bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
608  bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
609  bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
610  bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
611  bp->tx.ring_cnt = MAX_TX_DESC_CNT;
612  bp->rx.ring_cnt = MAX_RX_DESC_CNT;
613  bp->rx.buf_cnt = NUM_RX_BUFFERS;
614 }
615 
616 void bnxt_free_mem ( struct bnxt *bp )
617 {
618  DBGP ( "%s\n", __func__ );
619  if ( bp->nq.bd_virt ) {
620  free_phys ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
621  bp->nq.bd_virt = NULL;
622  }
623 
624  if ( bp->cq.bd_virt ) {
625  free_phys ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
626  bp->cq.bd_virt = NULL;
627  }
628 
629  if ( bp->rx.bd_virt ) {
630  free_phys ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
631  bp->rx.bd_virt = NULL;
632  }
633 
634  if ( bp->tx.bd_virt ) {
635  free_phys ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
636  bp->tx.bd_virt = NULL;
637  }
638 
639  if ( bp->hwrm_addr_dma ) {
640  free_phys ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
641  bp->dma_addr_mapping = 0;
642  bp->hwrm_addr_dma = NULL;
643  }
644 
645  if ( bp->hwrm_addr_resp ) {
646  free_phys ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
647  bp->resp_addr_mapping = 0;
648  bp->hwrm_addr_resp = NULL;
649  }
650 
651  if ( bp->hwrm_addr_req ) {
652  free_phys ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
653  bp->req_addr_mapping = 0;
654  bp->hwrm_addr_req = NULL;
655  }
656  DBGP ( "- %s ( ): - Done\n", __func__ );
657 }
658 
659 int bnxt_alloc_mem ( struct bnxt *bp )
660 {
661  DBGP ( "%s\n", __func__ );
662  bp->hwrm_addr_req = malloc_phys ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
663  bp->hwrm_addr_resp = malloc_phys ( RESP_BUFFER_SIZE,
665  bp->hwrm_addr_dma = malloc_phys ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
666  bp->tx.bd_virt = malloc_phys ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
667  bp->rx.bd_virt = malloc_phys ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
670  test_if ( bp->hwrm_addr_req &&
671  bp->hwrm_addr_resp &&
672  bp->hwrm_addr_dma &&
673  bp->tx.bd_virt &&
674  bp->rx.bd_virt &&
675  bp->nq.bd_virt &&
676  bp->cq.bd_virt ) {
677  bnxt_mm_init ( bp, __func__ );
678  return STATUS_SUCCESS;
679  }
680 
681  DBGP ( "- %s ( ): Failed\n", __func__ );
682  bnxt_free_mem ( bp );
683  return -ENOMEM;
684 }
685 
686 static void hwrm_init ( struct bnxt *bp, struct input *req, u16 cmd, u16 len )
687 {
688  memset ( req, 0, len );
689  req->req_type = cmd;
690  req->cmpl_ring = ( u16 )HWRM_NA_SIGNATURE;
691  req->target_id = ( u16 )HWRM_NA_SIGNATURE;
692  req->resp_addr = bp->resp_addr_mapping;
693  req->seq_id = bp->seq_id++;
694 }
695 
696 static void hwrm_write_req ( struct bnxt *bp, void *req, u32 cnt )
697 {
698  u32 i = 0;
699 
700  for ( i = 0; i < cnt; i++ ) {
701  write32 ( ( ( u32 * )req )[i],
702  ( bp->bar0 + GRC_COM_CHAN_BASE + ( i * 4 ) ) );
703  }
704  write32 ( 0x1, ( bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG ) );
705 }
706 
707 static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len )
708 {
709  struct hwrm_short_input sreq;
710 
711  memset ( &sreq, 0, sizeof ( struct hwrm_short_input ) );
712  sreq.req_type = ( u16 ) ( ( struct input * )bp->hwrm_addr_req )->req_type;
714  sreq.size = len;
715  sreq.req_addr = bp->req_addr_mapping;
716  mdelay ( 100 );
717  dbg_short_cmd ( ( u8 * )&sreq, __func__,
718  sizeof ( struct hwrm_short_input ) );
719  hwrm_write_req ( bp, &sreq, sizeof ( struct hwrm_short_input ) / 4 );
720 }
721 
722 static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func )
723 {
724  struct input *req = ( struct input * )bp->hwrm_addr_req;
725  struct output *resp = ( struct output * )bp->hwrm_addr_resp;
726  u8 *ptr = ( u8 * )resp;
727  u32 idx;
728  u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER ( ( u32 )tmo );
729  u16 resp_len = 0;
730  u16 ret = STATUS_TIMEOUT;
731 
732  if ( len > bp->hwrm_max_req_len )
734  else
735  hwrm_write_req ( bp, req, ( u32 ) ( len / 4 ) );
736 
737  for ( idx = 0; idx < wait_cnt; idx++ ) {
738  resp_len = resp->resp_len;
739  test_if ( resp->seq_id == req->seq_id &&
740  resp->req_type == req->req_type &&
741  ptr[resp_len - 1] == 1 ) {
742  bp->last_resp_code = resp->error_code;
743  ret = resp->error_code;
744  break;
745  }
747  }
748  dbg_hw_cmd ( bp, func, len, resp_len, tmo, ret );
749  return ( int )ret;
750 }
751 
752 static int bnxt_hwrm_ver_get ( struct bnxt *bp )
753 {
754  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ver_get_input );
755  struct hwrm_ver_get_input *req;
756  struct hwrm_ver_get_output *resp;
757  int rc;
758 
759  DBGP ( "%s\n", __func__ );
760  req = ( struct hwrm_ver_get_input * )bp->hwrm_addr_req;
761  resp = ( struct hwrm_ver_get_output * )bp->hwrm_addr_resp;
762  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VER_GET, cmd_len );
763  req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
764  req->hwrm_intf_min = HWRM_VERSION_MINOR;
765  req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
766  rc = wait_resp ( bp, HWRM_CMD_DEFAULT_TIMEOUT, cmd_len, __func__ );
767  if ( rc )
768  return STATUS_FAILURE;
769 
770  bp->hwrm_spec_code =
771  resp->hwrm_intf_maj_8b << 16 |
772  resp->hwrm_intf_min_8b << 8 |
773  resp->hwrm_intf_upd_8b;
774  bp->hwrm_cmd_timeout = ( u32 )resp->def_req_timeout;
775  if ( !bp->hwrm_cmd_timeout )
776  bp->hwrm_cmd_timeout = ( u32 )HWRM_CMD_DEFAULT_TIMEOUT;
777  if ( resp->hwrm_intf_maj_8b >= 1 )
778  bp->hwrm_max_req_len = resp->max_req_win_len;
779  bp->chip_id =
780  resp->chip_rev << 24 |
781  resp->chip_metal << 16 |
782  resp->chip_bond_id << 8 |
783  resp->chip_platform_type;
784  bp->chip_num = resp->chip_num;
785  test_if ( ( resp->dev_caps_cfg & SHORT_CMD_SUPPORTED ) &&
786  ( resp->dev_caps_cfg & SHORT_CMD_REQUIRED ) )
788  bp->hwrm_max_ext_req_len = resp->max_ext_req_len;
789  if ( ( bp->chip_num == CHIP_NUM_57508 ) ||
790  ( bp->chip_num == CHIP_NUM_57504 ) ||
791  ( bp->chip_num == CHIP_NUM_57502 ) ) {
792  FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P5 );
794  }
795  if ( bp->chip_num == CHIP_NUM_57608 ) {
796  FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P7 );
798  }
799  dbg_fw_ver ( resp, bp->hwrm_cmd_timeout );
800  return STATUS_SUCCESS;
801 }
802 
803 static int bnxt_hwrm_func_resource_qcaps ( struct bnxt *bp )
804 {
805  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_resource_qcaps_input );
806  struct hwrm_func_resource_qcaps_input *req;
807  struct hwrm_func_resource_qcaps_output *resp;
808  int rc;
809 
810  DBGP ( "%s\n", __func__ );
811  req = ( struct hwrm_func_resource_qcaps_input * )bp->hwrm_addr_req;
812  resp = ( struct hwrm_func_resource_qcaps_output * )bp->hwrm_addr_resp;
813  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESOURCE_QCAPS,
814  cmd_len );
815  req->fid = ( u16 )HWRM_NA_SIGNATURE;
816  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
817  if ( rc != STATUS_SUCCESS )
818  return STATUS_SUCCESS;
819 
821 
822  // VFs
823  if ( !bp->vf ) {
824  bp->max_vfs = resp->max_vfs;
825  bp->vf_res_strategy = resp->vf_reservation_strategy;
826  }
827 
828  // vNICs
829  bp->min_vnics = resp->min_vnics;
830  bp->max_vnics = resp->max_vnics;
831 
832  // MSI-X
833  bp->max_msix = resp->max_msix;
834 
835  // Ring Groups
836  bp->min_hw_ring_grps = resp->min_hw_ring_grps;
837  bp->max_hw_ring_grps = resp->max_hw_ring_grps;
838 
839  // TX Rings
840  bp->min_tx_rings = resp->min_tx_rings;
841  bp->max_tx_rings = resp->max_tx_rings;
842 
843  // RX Rings
844  bp->min_rx_rings = resp->min_rx_rings;
845  bp->max_rx_rings = resp->max_rx_rings;
846 
847  // Completion Rings
848  bp->min_cp_rings = resp->min_cmpl_rings;
849  bp->max_cp_rings = resp->max_cmpl_rings;
850 
851  // RSS Contexts
852  bp->min_rsscos_ctxs = resp->min_rsscos_ctx;
853  bp->max_rsscos_ctxs = resp->max_rsscos_ctx;
854 
855  // L2 Contexts
856  bp->min_l2_ctxs = resp->min_l2_ctxs;
857  bp->max_l2_ctxs = resp->max_l2_ctxs;
858 
859  // Statistic Contexts
860  bp->min_stat_ctxs = resp->min_stat_ctx;
861  bp->max_stat_ctxs = resp->max_stat_ctx;
863  return STATUS_SUCCESS;
864 }
865 
866 static u32 bnxt_set_ring_info ( struct bnxt *bp )
867 {
868  u32 enables = 0;
869 
870  DBGP ( "%s\n", __func__ );
871  bp->num_cmpl_rings = DEFAULT_NUMBER_OF_CMPL_RINGS;
872  bp->num_tx_rings = DEFAULT_NUMBER_OF_TX_RINGS;
873  bp->num_rx_rings = DEFAULT_NUMBER_OF_RX_RINGS;
874  bp->num_hw_ring_grps = DEFAULT_NUMBER_OF_RING_GRPS;
875  bp->num_stat_ctxs = DEFAULT_NUMBER_OF_STAT_CTXS;
876 
877  if ( bp->min_cp_rings <= DEFAULT_NUMBER_OF_CMPL_RINGS )
878  bp->num_cmpl_rings = bp->min_cp_rings;
879 
880  if ( bp->min_tx_rings <= DEFAULT_NUMBER_OF_TX_RINGS )
881  bp->num_tx_rings = bp->min_tx_rings;
882 
883  if ( bp->min_rx_rings <= DEFAULT_NUMBER_OF_RX_RINGS )
884  bp->num_rx_rings = bp->min_rx_rings;
885 
886  if ( bp->min_hw_ring_grps <= DEFAULT_NUMBER_OF_RING_GRPS )
887  bp->num_hw_ring_grps = bp->min_hw_ring_grps;
888 
889  if ( bp->min_stat_ctxs <= DEFAULT_NUMBER_OF_STAT_CTXS )
890  bp->num_stat_ctxs = bp->min_stat_ctxs;
891 
892  dbg_num_rings ( bp );
898  return enables;
899 }
900 
901 static void bnxt_hwrm_assign_resources ( struct bnxt *bp )
902 {
903  struct hwrm_func_cfg_input *req;
904  u32 enables = 0;
905 
906  DBGP ( "%s\n", __func__ );
909 
910  req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
911  req->num_cmpl_rings = bp->num_cmpl_rings;
912  req->num_tx_rings = bp->num_tx_rings;
913  req->num_rx_rings = bp->num_rx_rings;
914  req->num_stat_ctxs = bp->num_stat_ctxs;
915  req->num_hw_ring_grps = bp->num_hw_ring_grps;
916  req->enables = enables;
917 }
918 
919 static int bnxt_hwrm_func_qcaps_req ( struct bnxt *bp )
920 {
921  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcaps_input );
922  struct hwrm_func_qcaps_input *req;
923  struct hwrm_func_qcaps_output *resp;
924  int rc;
925 
926  DBGP ( "%s\n", __func__ );
927  if ( bp->vf )
928  return STATUS_SUCCESS;
929 
930  req = ( struct hwrm_func_qcaps_input * )bp->hwrm_addr_req;
931  resp = ( struct hwrm_func_qcaps_output * )bp->hwrm_addr_resp;
932  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCAPS, cmd_len );
933  req->fid = ( u16 )HWRM_NA_SIGNATURE;
934  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
935  if ( rc ) {
936  DBGP ( "- %s ( ): Failed\n", __func__ );
937  return STATUS_FAILURE;
938  }
939 
940  bp->fid = resp->fid;
941  bp->port_idx = ( u8 )resp->port_id;
942 
943  /* Get MAC address for this PF */
944  memcpy ( &bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN );
945  dbg_func_qcaps ( bp );
946  return STATUS_SUCCESS;
947 }
948 
949 static int bnxt_hwrm_func_qcfg_req ( struct bnxt *bp )
950 {
951  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcfg_input );
952  struct hwrm_func_qcfg_input *req;
953  struct hwrm_func_qcfg_output *resp;
954  int rc;
955 
956  DBGP ( "%s\n", __func__ );
957  req = ( struct hwrm_func_qcfg_input * )bp->hwrm_addr_req;
958  resp = ( struct hwrm_func_qcfg_output * )bp->hwrm_addr_resp;
959  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCFG, cmd_len );
960  req->fid = ( u16 )HWRM_NA_SIGNATURE;
961  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
962  if ( rc ) {
963  DBGP ( "- %s ( ): Failed\n", __func__ );
964  return STATUS_FAILURE;
965  }
966 
968  FLAG_SET ( bp->flags, BNXT_FLAG_MULTI_HOST );
969 
970  if ( resp->port_partition_type &
972  FLAG_SET ( bp->flags, BNXT_FLAG_NPAR_MODE );
973 
974  bp->ordinal_value = ( u8 )resp->pci_id & 0x0F;
975  bp->stat_ctx_id = resp->stat_ctx_id;
976 
977  /* If VF is set to TRUE, then use some data from func_qcfg ( ). */
978  if ( bp->vf ) {
979  bp->fid = resp->fid;
980  bp->port_idx = ( u8 )resp->port_id;
981  bp->vlan_id = resp->vlan;
982 
983  /* Get MAC address for this VF */
984  memcpy ( bp->mac_addr, resp->mac_address, ETH_ALEN );
985  }
986  dbg_func_qcfg ( bp );
987  return STATUS_SUCCESS;
988 }
989 
990 static int bnxt_hwrm_port_phy_qcaps_req ( struct bnxt *bp )
991 {
992  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcaps_input );
993  struct hwrm_port_phy_qcaps_input *req;
994  struct hwrm_port_phy_qcaps_output *resp;
995  int rc;
996 
997  DBGP ( "%s\n", __func__ );
998 
999  req = ( struct hwrm_port_phy_qcaps_input * )bp->hwrm_addr_req;
1000  resp = ( struct hwrm_port_phy_qcaps_output * )bp->hwrm_addr_resp;
1001  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCAPS, cmd_len );
1002  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1003  if ( rc ) {
1004  DBGP ( "-s %s ( ): Failed\n", __func__ );
1005  return STATUS_FAILURE;
1006  }
1007 
1009  FLAG_SET ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 );
1010 
1011  return STATUS_SUCCESS;
1012 }
1013 
1014 static int bnxt_hwrm_func_reset_req ( struct bnxt *bp )
1015 {
1016  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_reset_input );
1017  struct hwrm_func_reset_input *req;
1018 
1019  DBGP ( "%s\n", __func__ );
1020  req = ( struct hwrm_func_reset_input * )bp->hwrm_addr_req;
1021  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESET, cmd_len );
1022  if ( !bp->vf )
1024 
1025  return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1026 }
1027 
1028 static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp )
1029 {
1030  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input );
1031  struct hwrm_func_cfg_input *req;
1032 
1033  DBGP ( "%s\n", __func__ );
1034  if ( bp->vf )
1035  return STATUS_SUCCESS;
1036 
1037  req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
1038  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
1039  req->fid = ( u16 )HWRM_NA_SIGNATURE;
1041  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) {
1045  req->num_msix = 1;
1046  req->num_vnics = 1;
1048  }
1049  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1050 }
1051 
1052 static int bnxt_hwrm_func_drv_rgtr ( struct bnxt *bp )
1053 {
1054  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_rgtr_input );
1055  struct hwrm_func_drv_rgtr_input *req;
1056  int rc;
1057 
1058  DBGP ( "%s\n", __func__ );
1059  req = ( struct hwrm_func_drv_rgtr_input * )bp->hwrm_addr_req;
1060  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_RGTR, cmd_len );
1061 
1062  /* Register with HWRM */
1066  req->async_event_fwd[0] |= 0x01;
1068  req->ver_maj = IPXE_VERSION_MAJOR;
1069  req->ver_min = IPXE_VERSION_MINOR;
1071  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1072  if ( rc ) {
1073  DBGP ( "- %s ( ): Failed\n", __func__ );
1074  return STATUS_FAILURE;
1075  }
1076 
1077  FLAG_SET ( bp->flag_hwrm, VALID_DRIVER_REG );
1078  return STATUS_SUCCESS;
1079 }
1080 
1081 static int bnxt_hwrm_func_drv_unrgtr ( struct bnxt *bp )
1082 {
1083  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_unrgtr_input );
1084  struct hwrm_func_drv_unrgtr_input *req;
1085  int rc;
1086 
1087  DBGP ( "%s\n", __func__ );
1088  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_DRIVER_REG ) ) )
1089  return STATUS_SUCCESS;
1090 
1091  req = ( struct hwrm_func_drv_unrgtr_input * )bp->hwrm_addr_req;
1092  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_UNRGTR, cmd_len );
1094  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1095  if ( rc )
1096  return STATUS_FAILURE;
1097 
1098  FLAG_RESET ( bp->flag_hwrm, VALID_DRIVER_REG );
1099  return STATUS_SUCCESS;
1100 }
1101 
1102 static int bnxt_hwrm_set_async_event ( struct bnxt *bp )
1103 {
1104  int rc;
1105  u16 idx;
1106 
1107  DBGP ( "%s\n", __func__ );
1108  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) )
1109  idx = bp->nq_ring_id;
1110  else
1111  idx = bp->cq_ring_id;
1112  if ( bp->vf ) {
1113  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_vf_cfg_input );
1114  struct hwrm_func_vf_cfg_input *req;
1115 
1116  req = ( struct hwrm_func_vf_cfg_input * )bp->hwrm_addr_req;
1117  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_VF_CFG,
1118  cmd_len );
1120  req->async_event_cr = idx;
1121  req->mtu = bp->mtu;
1122  req->guest_vlan = bp->vlan_id;
1123  memcpy ( ( char * )&req->dflt_mac_addr[0], bp->mac_addr,
1124  ETH_ALEN );
1125  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1126  } else {
1127  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input );
1128  struct hwrm_func_cfg_input *req;
1129 
1130  req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
1131  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
1132  req->fid = ( u16 )HWRM_NA_SIGNATURE;
1134  req->async_event_cr = idx;
1135  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1136  }
1137  return rc;
1138 }
1139 
1140 static int bnxt_hwrm_cfa_l2_filter_alloc ( struct bnxt *bp )
1141 {
1142  u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_alloc_input );
1143  struct hwrm_cfa_l2_filter_alloc_input *req;
1144  struct hwrm_cfa_l2_filter_alloc_output *resp;
1145  int rc;
1147  u32 enables;
1148 
1149  DBGP ( "%s\n", __func__ );
1150  req = ( struct hwrm_cfa_l2_filter_alloc_input * )bp->hwrm_addr_req;
1151  resp = ( struct hwrm_cfa_l2_filter_alloc_output * )bp->hwrm_addr_resp;
1152  if ( bp->vf )
1157 
1158  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_ALLOC,
1159  cmd_len );
1160  req->flags = flags;
1161  req->enables = enables;
1162  memcpy ( ( char * )&req->l2_addr[0], ( char * )&bp->mac_addr[0],
1163  ETH_ALEN );
1164  memset ( ( char * )&req->l2_addr_mask[0], 0xff, ETH_ALEN );
1165  if ( !bp->vf ) {
1166  memcpy ( ( char * )&req->t_l2_addr[0], bp->mac_addr, ETH_ALEN );
1167  memset ( ( char * )&req->t_l2_addr_mask[0], 0xff, ETH_ALEN );
1168  }
1170  req->src_id = ( u32 )bp->port_idx;
1171  req->dst_id = bp->vnic_id;
1172  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1173  if ( rc )
1174  return STATUS_FAILURE;
1175 
1176  FLAG_SET ( bp->flag_hwrm, VALID_L2_FILTER );
1177  bp->l2_filter_id = resp->l2_filter_id;
1178  return STATUS_SUCCESS;
1179 }
1180 
1181 static int bnxt_hwrm_cfa_l2_filter_free ( struct bnxt *bp )
1182 {
1183  u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_free_input );
1184  struct hwrm_cfa_l2_filter_free_input *req;
1185  int rc;
1186 
1187  DBGP ( "%s\n", __func__ );
1188  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_L2_FILTER ) ) )
1189  return STATUS_SUCCESS;
1190 
1191  req = ( struct hwrm_cfa_l2_filter_free_input * )bp->hwrm_addr_req;
1192  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_FREE,
1193  cmd_len );
1194  req->l2_filter_id = bp->l2_filter_id;
1195  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1196  if ( rc ) {
1197  DBGP ( "- %s ( ): Failed\n", __func__ );
1198  return STATUS_FAILURE;
1199  }
1200 
1201  FLAG_RESET ( bp->flag_hwrm, VALID_L2_FILTER );
1202  return STATUS_SUCCESS;
1203 }
1204 
1205 u32 set_rx_mask ( u32 rx_mask )
1206 {
1207  u32 mask = 0;
1208 
1209  if ( !rx_mask )
1210  return mask;
1211 
1213  if ( rx_mask != RX_MASK_ACCEPT_NONE ) {
1214  if ( rx_mask & RX_MASK_ACCEPT_MULTICAST )
1216  if ( rx_mask & RX_MASK_ACCEPT_ALL_MULTICAST )
1218  if ( rx_mask & RX_MASK_PROMISCUOUS_MODE )
1220  }
1221  return mask;
1222 }
1223 
1224 static int bnxt_hwrm_set_rx_mask ( struct bnxt *bp, u32 rx_mask )
1225 {
1226  u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_set_rx_mask_input );
1227  struct hwrm_cfa_l2_set_rx_mask_input *req;
1228  u32 mask = set_rx_mask ( rx_mask );
1229 
1230  req = ( struct hwrm_cfa_l2_set_rx_mask_input * )bp->hwrm_addr_req;
1231  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_SET_RX_MASK,
1232  cmd_len );
1233  req->vnic_id = bp->vnic_id;
1234  req->mask = mask;
1235 
1236  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1237 }
1238 
1239 static int bnxt_hwrm_port_phy_qcfg ( struct bnxt *bp, u16 idx )
1240 {
1241  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcfg_input );
1242  struct hwrm_port_phy_qcfg_input *req;
1243  struct hwrm_port_phy_qcfg_output *resp;
1244  int rc;
1245 
1246  DBGP ( "%s\n", __func__ );
1247  req = ( struct hwrm_port_phy_qcfg_input * )bp->hwrm_addr_req;
1248  resp = ( struct hwrm_port_phy_qcfg_output * )bp->hwrm_addr_resp;
1249  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCFG, cmd_len );
1250  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1251  if ( rc ) {
1252  DBGP ( "- %s ( ): Failed\n", __func__ );
1253  return STATUS_FAILURE;
1254  }
1255 
1256  if ( idx & SUPPORT_SPEEDS )
1257  bp->support_speeds = resp->support_speeds;
1258 
1259  if ( idx & SUPPORT_SPEEDS2 )
1260  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) )
1261  bp->auto_link_speeds2_mask = resp->auto_link_speeds2;
1262 
1263  if ( idx & DETECT_MEDIA )
1264  bp->media_detect = resp->module_status;
1265 
1266  if ( idx & PHY_SPEED )
1267  bp->current_link_speed = resp->link_speed;
1268 
1269  if ( idx & PHY_STATUS ) {
1270  if ( resp->link == PORT_PHY_QCFG_RESP_LINK_LINK )
1271  bp->link_status = STATUS_LINK_ACTIVE;
1272  else
1273  bp->link_status = STATUS_LINK_DOWN;
1274  }
1275  return STATUS_SUCCESS;
1276 }
1277 
1279  u16 data_len, u16 option_num, u16 dimensions, u16 index_0 )
1280 {
1281  u16 cmd_len = ( u16 )sizeof ( struct hwrm_nvm_get_variable_input );
1282  struct hwrm_nvm_get_variable_input *req;
1283 
1284  DBGP ( "%s\n", __func__ );
1285  req = ( struct hwrm_nvm_get_variable_input * )bp->hwrm_addr_req;
1286  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_NVM_GET_VARIABLE, cmd_len );
1287  req->dest_data_addr = bp->dma_addr_mapping;
1288  req->data_len = data_len;
1289  req->option_num = option_num;
1290  req->dimensions = dimensions;
1291  req->index_0 = index_0;
1292  return wait_resp ( bp,
1293  HWRM_CMD_FLASH_MULTIPLAYER ( bp->hwrm_cmd_timeout ),
1294  cmd_len, __func__ );
1295 }
1296 
1297 static int bnxt_get_link_speed ( struct bnxt *bp )
1298 {
1299  u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma;
1300 
1301  DBGP ( "%s\n", __func__ );
1302  if ( ! ( FLAG_TEST (bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ) ) {
1305  1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1306  return STATUS_FAILURE;
1307  bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT );
1309  ( u16 )D3_LINK_SPEED_FW_NUM, 1,
1310  ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1311  return STATUS_FAILURE;
1312  bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK,
1314  }
1317  1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1318  return STATUS_FAILURE;
1319  bp->link_set |= SET_LINK ( *ptr32, SPEED_FW_MASK, SPEED_FW_SHIFT );
1322  1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1323  return STATUS_FAILURE;
1324  bp->link_set |= SET_LINK ( *ptr32,
1326 
1327  /* Use LINK_SPEED_FW_xxx which is valid for CHIP_P7 and earlier devices */
1328  switch ( bp->link_set & LINK_SPEED_FW_MASK ) {
1329  case LINK_SPEED_FW_1G:
1331  break;
1332  case LINK_SPEED_FW_2_5G:
1334  break;
1335  case LINK_SPEED_FW_10G:
1336  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_10GBPS );
1337  break;
1338  case LINK_SPEED_FW_25G:
1339  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_25GBPS );
1340  break;
1341  case LINK_SPEED_FW_40G:
1342  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_40GBPS );
1343  break;
1344  case LINK_SPEED_FW_50G:
1345  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_50GBPS );
1346  break;
1349  break;
1350  case LINK_SPEED_FW_100G:
1351  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_100GBPS );
1352  break;
1355  break;
1358  break;
1359  case LINK_SPEED_FW_200G:
1360  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_200GBPS );
1361  break;
1364  break;
1367  break;
1370  break;
1371  case LINK_SPEED_FW_AUTONEG:
1372  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_AUTONEG );
1373  break;
1374  default:
1375  bp->medium = SET_MEDIUM_DUPLEX ( bp, MEDIUM_FULL_DUPLEX );
1376  break;
1377  }
1378  prn_set_speed ( bp->link_set );
1379  return STATUS_SUCCESS;
1380 }
1381 
1382 static int bnxt_get_vlan ( struct bnxt *bp )
1383 {
1384  u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma;
1385 
1386  /* If VF is set to TRUE, Do not issue this command */
1387  if ( bp->vf )
1388  return STATUS_SUCCESS;
1389 
1390  if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ) ) {
1393  ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
1394  return STATUS_FAILURE;
1395 
1396  bp->mba_cfg2 = SET_MBA ( *ptr32, VLAN_MASK, VLAN_SHIFT );
1399  ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
1400  return STATUS_FAILURE;
1401 
1402  bp->mba_cfg2 |= SET_MBA ( *ptr32, VLAN_VALUE_MASK, VLAN_VALUE_SHIFT );
1403  if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
1404  bp->vlan_id = bp->mba_cfg2 & VLAN_VALUE_MASK;
1405  else
1406  bp->vlan_id = 0;
1407 
1408  if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
1409  DBGP ( "VLAN MBA Enabled ( %d )\n",
1410  ( bp->mba_cfg2 & VLAN_VALUE_MASK ) );
1411 
1412  }
1413  return STATUS_SUCCESS;
1414 }
1415 
1416 static int bnxt_hwrm_backing_store_qcfg ( struct bnxt *bp )
1417 {
1418  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_qcfg_input );
1420 
1421  DBGP ( "%s\n", __func__ );
1422  if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1423  return STATUS_SUCCESS;
1424 
1425  req = ( struct hwrm_func_backing_store_qcfg_input * )bp->hwrm_addr_req;
1426  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_QCFG,
1427  cmd_len );
1428  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1429 }
1430 
1431 static int bnxt_hwrm_backing_store_cfg ( struct bnxt *bp )
1432 {
1433  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_cfg_input );
1435 
1436  DBGP ( "%s\n", __func__ );
1437  if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1438  return STATUS_SUCCESS;
1439 
1440  req = ( struct hwrm_func_backing_store_cfg_input * )bp->hwrm_addr_req;
1441  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_CFG,
1442  cmd_len );
1444  req->enables = 0;
1445  return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1446 }
1447 
1448 static int bnxt_hwrm_queue_qportcfg ( struct bnxt *bp )
1449 {
1450  u16 cmd_len = ( u16 )sizeof ( struct hwrm_queue_qportcfg_input );
1451  struct hwrm_queue_qportcfg_input *req;
1452  struct hwrm_queue_qportcfg_output *resp;
1453  int rc;
1454 
1455  DBGP ( "%s\n", __func__ );
1456  if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1457  return STATUS_SUCCESS;
1458 
1459  req = ( struct hwrm_queue_qportcfg_input * )bp->hwrm_addr_req;
1460  resp = ( struct hwrm_queue_qportcfg_output * )bp->hwrm_addr_resp;
1461  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_QUEUE_QPORTCFG, cmd_len );
1462  req->flags = 0;
1463  req->port_id = 0;
1464  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1465  if ( rc ) {
1466  DBGP ( "- %s ( ): Failed\n", __func__ );
1467  return STATUS_FAILURE;
1468  }
1469 
1470  bp->queue_id = resp->queue_id0;
1471  return STATUS_SUCCESS;
1472 }
1473 
1474 static int bnxt_hwrm_port_mac_cfg ( struct bnxt *bp )
1475 {
1476  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_mac_cfg_input );
1477  struct hwrm_port_mac_cfg_input *req;
1478 
1479  DBGP ( "%s\n", __func__ );
1480  if ( bp->vf )
1481  return STATUS_SUCCESS;
1482 
1483  req = ( struct hwrm_port_mac_cfg_input * )bp->hwrm_addr_req;
1484  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_MAC_CFG, cmd_len );
1486  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1487 }
1488 
1489 static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp )
1490 {
1491  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_cfg_input );
1492  struct hwrm_port_phy_cfg_input *req;
1493  u32 flags;
1494  u32 enables = 0;
1495  u16 force_link_speed = 0;
1496  u16 force_link_speeds2 = 0;
1500  u8 auto_mode = 0;
1501  u8 auto_pause = 0;
1502  u8 auto_duplex = 0;
1503 
1504  DBGP ( "%s\n", __func__ );
1505  req = ( struct hwrm_port_phy_cfg_input * )bp->hwrm_addr_req;
1508 
1509  switch ( GET_MEDIUM_SPEED ( bp->medium ) ) {
1510  case MEDIUM_SPEED_1000MBPS:
1512  break;
1513  case MEDIUM_SPEED_10GBPS:
1514  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1517  } else {
1519  }
1520  break;
1521  case MEDIUM_SPEED_25GBPS:
1522  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1525  } else {
1527  }
1528  break;
1529  case MEDIUM_SPEED_40GBPS:
1530  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1533  } else {
1535  }
1536  break;
1537  case MEDIUM_SPEED_50GBPS:
1538  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1541  } else {
1543  }
1544  break;
1546  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1549  } else {
1552  }
1553  break;
1554  case MEDIUM_SPEED_100GBPS:
1555  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1558  } else {
1560  }
1561  break;
1563  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1566  } else {
1569  }
1570  break;
1572  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1575  }
1576  break;
1577  case MEDIUM_SPEED_200GBPS:
1578  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1581  } else {
1584  }
1585  break;
1587  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1590  }
1591  break;
1593  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1596  }
1597  break;
1599  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
1602  }
1603  break;
1604  default:
1610  if ( FLAG_TEST (bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) )
1612  else
1617  auto_link_speed_mask = bp->support_speeds;
1618  auto_link_speeds2_mask = bp->auto_link_speeds2_mask;
1619  break;
1620  }
1621 
1622  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_CFG, cmd_len );
1623  req->flags = flags;
1624  req->enables = enables;
1625  req->port_id = bp->port_idx;
1626  req->force_link_speed = force_link_speed;
1627  req->force_pam4_link_speed = force_pam4_link_speed;
1628  req->force_link_speeds2 = force_link_speeds2;
1629  req->auto_mode = auto_mode;
1630  req->auto_duplex = auto_duplex;
1631  req->auto_pause = auto_pause;
1632  req->auto_link_speed_mask = auto_link_speed_mask;
1633  req->auto_link_speeds2_mask = auto_link_speeds2_mask;
1634 
1635  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1636 }
1637 
1638 static int bnxt_query_phy_link ( struct bnxt *bp )
1639 {
1641 
1642  DBGP ( "%s\n", __func__ );
1643  /* Query Link Status */
1645  return STATUS_FAILURE;
1646  }
1647 
1648  if ( bp->link_status == STATUS_LINK_ACTIVE )
1649  return STATUS_SUCCESS;
1650 
1651  /* If VF is set to TRUE, Do not issue the following commands */
1652  if ( bp->vf )
1653  return STATUS_SUCCESS;
1654 
1655  /* If multi_host or NPAR, Do not issue bnxt_get_link_speed */
1656  if ( FLAG_TEST ( bp->flags, PORT_PHY_FLAGS ) ) {
1657  dbg_flags ( __func__, bp->flags );
1658  return STATUS_SUCCESS;
1659  }
1660 
1661  /* HWRM_NVM_GET_VARIABLE - speed */
1662  if ( bnxt_get_link_speed ( bp ) != STATUS_SUCCESS ) {
1663  return STATUS_FAILURE;
1664  }
1665 
1666  /* Configure link if it is not up */
1668 
1669  /* refresh link speed values after bringing link up */
1670  return bnxt_hwrm_port_phy_qcfg ( bp, flag );
1671 }
1672 
1673 static int bnxt_get_phy_link ( struct bnxt *bp )
1674 {
1675  u16 i;
1677 
1678  DBGP ( "%s\n", __func__ );
1679  dbg_chip_info ( bp );
1680  for ( i = 0; i < ( bp->wait_link_timeout / 100 ); i++ ) {
1682  break;
1683 
1684  if ( bp->link_status == STATUS_LINK_ACTIVE )
1685  break;
1686 
1687 // if ( bp->media_detect )
1688 // break;
1690  }
1691  dbg_link_state ( bp, ( u32 ) ( ( i + 1 ) * 100 ) );
1692  bnxt_set_link ( bp );
1693  return STATUS_SUCCESS;
1694 }
1695 
1696 static int bnxt_hwrm_stat_ctx_alloc ( struct bnxt *bp )
1697 {
1698  u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_alloc_input );
1699  struct hwrm_stat_ctx_alloc_input *req;
1700  struct hwrm_stat_ctx_alloc_output *resp;
1701  int rc;
1702 
1703  DBGP ( "%s\n", __func__ );
1704  req = ( struct hwrm_stat_ctx_alloc_input * )bp->hwrm_addr_req;
1705  resp = ( struct hwrm_stat_ctx_alloc_output * )bp->hwrm_addr_resp;
1706  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_ALLOC, cmd_len );
1707  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1708  if ( rc ) {
1709  DBGP ( "- %s ( ): Failed\n", __func__ );
1710  return STATUS_FAILURE;
1711  }
1712 
1713  FLAG_SET ( bp->flag_hwrm, VALID_STAT_CTX );
1714  bp->stat_ctx_id = ( u16 )resp->stat_ctx_id;
1715  return STATUS_SUCCESS;
1716 }
1717 
1718 static int bnxt_hwrm_stat_ctx_free ( struct bnxt *bp )
1719 {
1720  u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_free_input );
1721  struct hwrm_stat_ctx_free_input *req;
1722  int rc;
1723 
1724  DBGP ( "%s\n", __func__ );
1725  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_STAT_CTX ) ) )
1726  return STATUS_SUCCESS;
1727 
1728  req = ( struct hwrm_stat_ctx_free_input * )bp->hwrm_addr_req;
1729  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_FREE, cmd_len );
1730  req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
1731  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1732  if ( rc ) {
1733  DBGP ( "- %s ( ): Failed\n", __func__ );
1734  return STATUS_FAILURE;
1735  }
1736 
1737  FLAG_RESET ( bp->flag_hwrm, VALID_STAT_CTX );
1738  return STATUS_SUCCESS;
1739 }
1740 
1741 static int bnxt_hwrm_ring_free_grp ( struct bnxt *bp )
1742 {
1743  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_free_input );
1744  struct hwrm_ring_grp_free_input *req;
1745  int rc;
1746 
1747  DBGP ( "%s\n", __func__ );
1748  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_GRP ) ) )
1749  return STATUS_SUCCESS;
1750 
1751  req = ( struct hwrm_ring_grp_free_input * )bp->hwrm_addr_req;
1752  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_FREE, cmd_len );
1753  req->ring_group_id = ( u32 )bp->ring_grp_id;
1754  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1755  if ( rc ) {
1756  DBGP ( "- %s ( ): Failed\n", __func__ );
1757  return STATUS_FAILURE;
1758  }
1759 
1760  FLAG_RESET ( bp->flag_hwrm, VALID_RING_GRP );
1761  return STATUS_SUCCESS;
1762 }
1763 
1764 static int bnxt_hwrm_ring_alloc_grp ( struct bnxt *bp )
1765 {
1766  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_alloc_input );
1767  struct hwrm_ring_grp_alloc_input *req;
1768  struct hwrm_ring_grp_alloc_output *resp;
1769  int rc;
1770 
1771  DBGP ( "%s\n", __func__ );
1772  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) )
1773  return STATUS_SUCCESS;
1774 
1775  req = ( struct hwrm_ring_grp_alloc_input * )bp->hwrm_addr_req;
1776  resp = ( struct hwrm_ring_grp_alloc_output * )bp->hwrm_addr_resp;
1777  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_ALLOC, cmd_len );
1778  req->cr = bp->cq_ring_id;
1779  req->rr = bp->rx_ring_id;
1780  req->ar = ( u16 )HWRM_NA_SIGNATURE;
1781  if ( bp->vf )
1782  req->sc = bp->stat_ctx_id;
1783 
1784  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1785  if ( rc ) {
1786  DBGP ( "- %s ( ): Failed\n", __func__ );
1787  return STATUS_FAILURE;
1788  }
1789 
1790  FLAG_SET ( bp->flag_hwrm, VALID_RING_GRP );
1791  bp->ring_grp_id = ( u16 )resp->ring_group_id;
1792  return STATUS_SUCCESS;
1793 }
1794 
1795 int bnxt_hwrm_ring_free ( struct bnxt *bp, u16 ring_id, u8 ring_type )
1796 {
1797  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_free_input );
1798  struct hwrm_ring_free_input *req;
1799 
1800  DBGP ( "%s\n", __func__ );
1801  req = ( struct hwrm_ring_free_input * )bp->hwrm_addr_req;
1802  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_FREE, cmd_len );
1803  req->ring_type = ring_type;
1804  req->ring_id = ring_id;
1805  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1806 }
1807 
1808 static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type )
1809 {
1810  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_alloc_input );
1811  struct hwrm_ring_alloc_input *req;
1812  struct hwrm_ring_alloc_output *resp;
1813  int rc;
1814 
1815  DBGP ( "%s\n", __func__ );
1816  req = ( struct hwrm_ring_alloc_input * )bp->hwrm_addr_req;
1817  resp = ( struct hwrm_ring_alloc_output * )bp->hwrm_addr_resp;
1818  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_ALLOC, cmd_len );
1819  req->ring_type = type;
1820  switch ( type ) {
1822  req->page_size = LM_PAGE_BITS ( 12 );
1823  req->int_mode = BNXT_CQ_INTR_MODE ( ( (FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7) ) || bp->vf ) );
1824  req->length = ( u32 )bp->nq.ring_cnt;
1825  req->logical_id = 0xFFFF; // Required value for Thor FW?
1826  req->page_tbl_addr = virt_to_bus ( bp->nq.bd_virt );
1827  break;
1829  req->page_size = LM_PAGE_BITS ( 8 );
1830  req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
1831  req->length = ( u32 )bp->cq.ring_cnt;
1832  req->page_tbl_addr = virt_to_bus ( bp->cq.bd_virt );
1833  if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1834  break;
1836  req->nq_ring_id = bp->nq_ring_id;
1837  req->cq_handle = ( u64 )bp->nq_ring_id;
1838  break;
1840  req->page_size = LM_PAGE_BITS ( 8 );
1841  req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL;
1842  req->length = ( u32 )bp->tx.ring_cnt;
1843  req->queue_id = TX_RING_QID;
1844  req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
1845  req->cmpl_ring_id = bp->cq_ring_id;
1846  req->page_tbl_addr = virt_to_bus ( bp->tx.bd_virt );
1847  break;
1849  req->page_size = LM_PAGE_BITS ( 8 );
1850  req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL;
1851  req->length = ( u32 )bp->rx.ring_cnt;
1852  req->stat_ctx_id = ( u32 )STAT_CTX_ID;
1853  req->cmpl_ring_id = bp->cq_ring_id;
1854  req->page_tbl_addr = virt_to_bus ( bp->rx.bd_virt );
1855  if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1856  break;
1857  req->queue_id = ( u16 )RX_RING_QID;
1858  req->rx_buf_size = MAX_ETHERNET_PACKET_BUFFER_SIZE;
1860  break;
1861  default:
1862  return STATUS_SUCCESS;
1863  }
1864  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1865  if ( rc ) {
1866  DBGP ( "- %s ( ): Failed, type = %x\n", __func__, type );
1867  return STATUS_FAILURE;
1868  }
1869 
1871  FLAG_SET ( bp->flag_hwrm, VALID_RING_CQ );
1872  bp->cq_ring_id = resp->ring_id;
1873  } else if ( type == RING_ALLOC_REQ_RING_TYPE_TX ) {
1874  FLAG_SET ( bp->flag_hwrm, VALID_RING_TX );
1875  bp->tx_ring_id = resp->ring_id;
1876  } else if ( type == RING_ALLOC_REQ_RING_TYPE_RX ) {
1877  FLAG_SET ( bp->flag_hwrm, VALID_RING_RX );
1878  bp->rx_ring_id = resp->ring_id;
1879  } else if ( type == RING_ALLOC_REQ_RING_TYPE_NQ ) {
1880  FLAG_SET ( bp->flag_hwrm, VALID_RING_NQ );
1881  bp->nq_ring_id = resp->ring_id;
1882  }
1883  return STATUS_SUCCESS;
1884 }
1885 
1886 static int bnxt_hwrm_ring_alloc_cq ( struct bnxt *bp )
1887 {
1888  DBGP ( "%s\n", __func__ );
1890 }
1891 
1892 static int bnxt_hwrm_ring_alloc_tx ( struct bnxt *bp )
1893 {
1894  DBGP ( "%s\n", __func__ );
1896 }
1897 
1898 static int bnxt_hwrm_ring_alloc_rx ( struct bnxt *bp )
1899 {
1900  DBGP ( "%s\n", __func__ );
1902 }
1903 
1904 static int bnxt_hwrm_ring_free_cq ( struct bnxt *bp )
1905 {
1906  int ret = STATUS_SUCCESS;
1907 
1908  DBGP ( "%s\n", __func__ );
1909  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_CQ ) ) )
1910  return ret;
1911 
1912  ret = RING_FREE ( bp, bp->cq_ring_id, RING_FREE_REQ_RING_TYPE_L2_CMPL );
1913  if ( ret == STATUS_SUCCESS )
1914  FLAG_RESET ( bp->flag_hwrm, VALID_RING_CQ );
1915 
1916  return ret;
1917 }
1918 
1919 static int bnxt_hwrm_ring_free_tx ( struct bnxt *bp )
1920 {
1921  int ret = STATUS_SUCCESS;
1922 
1923  DBGP ( "%s\n", __func__ );
1924  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_TX ) ) )
1925  return ret;
1926 
1927  ret = RING_FREE ( bp, bp->tx_ring_id, RING_FREE_REQ_RING_TYPE_TX );
1928  if ( ret == STATUS_SUCCESS )
1929  FLAG_RESET ( bp->flag_hwrm, VALID_RING_TX );
1930 
1931  return ret;
1932 }
1933 
1934 static int bnxt_hwrm_ring_free_rx ( struct bnxt *bp )
1935 {
1936  int ret = STATUS_SUCCESS;
1937 
1938  DBGP ( "%s\n", __func__ );
1939  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_RX ) ) )
1940  return ret;
1941 
1942  ret = RING_FREE ( bp, bp->rx_ring_id, RING_FREE_REQ_RING_TYPE_RX );
1943  if ( ret == STATUS_SUCCESS )
1944  FLAG_RESET ( bp->flag_hwrm, VALID_RING_RX );
1945 
1946  return ret;
1947 }
1948 
1949 static int bnxt_hwrm_ring_alloc_nq ( struct bnxt *bp )
1950 {
1951  if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1952  return STATUS_SUCCESS;
1954 }
1955 
1956 static int bnxt_hwrm_ring_free_nq ( struct bnxt *bp )
1957 {
1958  int ret = STATUS_SUCCESS;
1959 
1960  if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
1961  return STATUS_SUCCESS;
1962 
1963  DBGP ( "%s\n", __func__ );
1964  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_NQ ) ) )
1965  return ret;
1966 
1967  ret = RING_FREE ( bp, bp->nq_ring_id, RING_FREE_REQ_RING_TYPE_NQ );
1968  if ( ret == STATUS_SUCCESS )
1969  FLAG_RESET ( bp->flag_hwrm, VALID_RING_NQ );
1970 
1971  return ret;
1972 }
1973 
1974 static int bnxt_hwrm_vnic_alloc ( struct bnxt *bp )
1975 {
1976  u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_alloc_input );
1977  struct hwrm_vnic_alloc_input *req;
1978  struct hwrm_vnic_alloc_output *resp;
1979  int rc;
1980 
1981  DBGP ( "%s\n", __func__ );
1982  req = ( struct hwrm_vnic_alloc_input * )bp->hwrm_addr_req;
1983  resp = ( struct hwrm_vnic_alloc_output * )bp->hwrm_addr_resp;
1984  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_ALLOC, cmd_len );
1985  req->flags = VNIC_ALLOC_REQ_FLAGS_DEFAULT;
1986  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1987  if ( rc ) {
1988  DBGP ( "- %s ( ): Failed\n", __func__ );
1989  return STATUS_FAILURE;
1990  }
1991 
1992  FLAG_SET ( bp->flag_hwrm, VALID_VNIC_ID );
1993  bp->vnic_id = resp->vnic_id;
1994  return STATUS_SUCCESS;
1995 }
1996 
1997 static int bnxt_hwrm_vnic_free ( struct bnxt *bp )
1998 {
1999  u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_free_input );
2000  struct hwrm_vnic_free_input *req;
2001  int rc;
2002 
2003  DBGP ( "%s\n", __func__ );
2004  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_VNIC_ID ) ) )
2005  return STATUS_SUCCESS;
2006 
2007  req = ( struct hwrm_vnic_free_input * )bp->hwrm_addr_req;
2008  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_FREE, cmd_len );
2009  req->vnic_id = bp->vnic_id;
2010  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
2011  if ( rc ) {
2012  DBGP ( "- %s ( ): Failed\n", __func__ );
2013  return STATUS_FAILURE;
2014  }
2015 
2016  FLAG_RESET ( bp->flag_hwrm, VALID_VNIC_ID );
2017  return STATUS_SUCCESS;
2018 }
2019 
2020 static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp )
2021 {
2022  u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_cfg_input );
2023  struct hwrm_vnic_cfg_input *req;
2024 
2025  DBGP ( "%s\n", __func__ );
2026  req = ( struct hwrm_vnic_cfg_input * )bp->hwrm_addr_req;
2027  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_CFG, cmd_len );
2029  req->mru = bp->mtu;
2030 
2031  if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) {
2034  req->default_rx_ring_id = bp->rx_ring_id;
2035  req->default_cmpl_ring_id = bp->cq_ring_id;
2036  } else {
2038  req->dflt_ring_grp = bp->ring_grp_id;
2039  }
2040 
2042  req->vnic_id = bp->vnic_id;
2043  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
2044 }
2045 
2046 static int bnxt_set_rx_mask ( struct bnxt *bp )
2047 {
2048  return bnxt_hwrm_set_rx_mask ( bp, RX_MASK );
2049 }
2050 
2051 static int bnxt_reset_rx_mask ( struct bnxt *bp )
2052 {
2053  return bnxt_hwrm_set_rx_mask ( bp, 0 );
2054 }
2055 
2056 typedef int ( *hwrm_func_t ) ( struct bnxt *bp );
2057 
2059  bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */
2060  NULL,
2061 };
2062 
2064  bnxt_hwrm_cfa_l2_filter_free, /* HWRM_CFA_L2_FILTER_FREE */
2066  bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
2067  bnxt_free_rx_iob, /* HWRM_FREE_IOB */
2068  bnxt_hwrm_vnic_free, /* HWRM_VNIC_FREE */
2069  bnxt_hwrm_ring_free_grp, /* HWRM_RING_GRP_FREE */
2070  bnxt_hwrm_ring_free_rx, /* HWRM_RING_FREE - RX Ring */
2071  bnxt_hwrm_ring_free_tx, /* HWRM_RING_FREE - TX Ring */
2072  bnxt_hwrm_stat_ctx_free, /* HWRM_STAT_CTX_FREE */
2073  bnxt_hwrm_ring_free_cq, /* HWRM_RING_FREE - CQ Ring */
2074  bnxt_hwrm_ring_free_nq, /* HWRM_RING_FREE - NQ Ring */
2075  NULL,
2076 };
2078  bnxt_hwrm_ver_get, /* HWRM_VER_GET */
2079  bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */
2080  bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */
2081  bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */
2082  bnxt_hwrm_backing_store_cfg, /* HWRM_FUNC_BACKING_STORE_CFG */
2083  bnxt_hwrm_backing_store_qcfg, /* HWRM_FUNC_BACKING_STORE_QCFG */
2084  bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */
2085  bnxt_hwrm_port_phy_qcaps_req, /* HWRM_PORT_PHY_QCAPS */
2086  bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */
2087  bnxt_get_vlan, /* HWRM_NVM_GET_VARIABLE - vlan */
2088  bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */
2089  bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG */
2090  bnxt_query_phy_link, /* HWRM_PORT_PHY_QCFG */
2091  bnxt_get_device_address, /* HW MAC address */
2092  NULL,
2093 };
2094 
2096  bnxt_hwrm_stat_ctx_alloc, /* HWRM_STAT_CTX_ALLOC */
2097  bnxt_hwrm_queue_qportcfg, /* HWRM_QUEUE_QPORTCFG */
2098  bnxt_hwrm_ring_alloc_nq, /* HWRM_RING_ALLOC - NQ Ring */
2099  bnxt_hwrm_ring_alloc_cq, /* HWRM_RING_ALLOC - CQ Ring */
2100  bnxt_hwrm_ring_alloc_tx, /* HWRM_RING_ALLOC - TX Ring */
2101  bnxt_hwrm_ring_alloc_rx, /* HWRM_RING_ALLOC - RX Ring */
2102  bnxt_hwrm_ring_alloc_grp, /* HWRM_RING_GRP_ALLOC - Group */
2103  bnxt_hwrm_vnic_alloc, /* HWRM_VNIC_ALLOC */
2104  bnxt_post_rx_buffers, /* Post RX buffers */
2105  bnxt_hwrm_set_async_event, /* ENABLES_ASYNC_EVENT_CR */
2106  bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
2107  bnxt_hwrm_cfa_l2_filter_alloc, /* HWRM_CFA_L2_FILTER_ALLOC */
2108  bnxt_get_phy_link, /* HWRM_PORT_PHY_QCFG - PhyLink */
2109  bnxt_set_rx_mask, /* HWRM_CFA_L2_SET_RX_MASK */
2110  NULL,
2111 };
2112 
2113 int bnxt_hwrm_run ( hwrm_func_t cmds[], struct bnxt *bp )
2114 {
2115  hwrm_func_t *ptr;
2116  int ret;
2117 
2118  for ( ptr = cmds; *ptr; ++ptr ) {
2119  memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
2120  memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
2121  ret = ( *ptr ) ( bp );
2122  if ( ret ) {
2123  DBGP ( "- %s ( ): Failed\n", __func__ );
2124  return STATUS_FAILURE;
2125  }
2126  }
2127  return STATUS_SUCCESS;
2128 }
2129 
2130 #define bnxt_down_chip( bp ) bnxt_hwrm_run ( bring_down_chip, bp )
2131 #define bnxt_up_chip( bp ) bnxt_hwrm_run ( bring_up_chip, bp )
2132 #define bnxt_down_nic( bp ) bnxt_hwrm_run ( bring_down_nic, bp )
2133 #define bnxt_up_nic( bp ) bnxt_hwrm_run ( bring_up_nic, bp )
2134 
2135 static int bnxt_open ( struct net_device *dev )
2136 {
2137  struct bnxt *bp = dev->priv;
2138 
2139  DBGP ( "%s\n", __func__ );
2140  bnxt_mm_nic ( bp );
2141  return (bnxt_up_nic ( bp ));
2142 }
2143 
2144 static void bnxt_tx_adjust_pkt ( struct bnxt *bp, struct io_buffer *iob )
2145 {
2146  u16 prev_len = iob_len ( iob );
2147 
2148  bp->vlan_tx = bnxt_get_pkt_vlan ( ( char * )iob->data );
2149  if ( !bp->vlan_tx && bp->vlan_id )
2150  bnxt_add_vlan ( iob, bp->vlan_id );
2151 
2152  dbg_tx_vlan ( bp, ( char * )iob->data, prev_len, iob_len ( iob ) );
2153  if ( iob_len ( iob ) != prev_len )
2154  prev_len = iob_len ( iob );
2155 
2156  iob_pad ( iob, ETH_ZLEN );
2157  dbg_tx_pad ( prev_len, iob_len ( iob ) );
2158 }
2159 
2160 static int bnxt_tx ( struct net_device *dev, struct io_buffer *iob )
2161 {
2162  struct bnxt *bp = dev->priv;
2163  u16 len, entry;
2164  dma_addr_t mapping;
2165 
2166  if ( bnxt_tx_avail ( bp ) < 1 ) {
2167  DBGP ( "- %s ( ): Failed no bd's available\n", __func__ );
2168  return -ENOBUFS;
2169  }
2170 
2171  bnxt_tx_adjust_pkt ( bp, iob );
2172  entry = bp->tx.prod_id;
2173  mapping = virt_to_bus ( iob->data );
2174  len = iob_len ( iob );
2175  bp->tx.iob[entry] = iob;
2176  bnxt_set_txq ( bp, entry, mapping, len );
2177  entry = NEXT_IDX ( entry, bp->tx.ring_cnt );
2178  /* If the ring has wrapped, toggle the epoch bit */
2179  if ( bp->tx.prod_id > entry )
2180  bp->tx.epoch ^= 1;
2181  dump_tx_pkt ( ( u8 * )iob->data, len, bp->tx.prod_id );
2182  /* Packets are ready, update Tx producer idx local and on card. */
2183  bnxt_db_tx ( bp, ( u32 )entry );
2184  bp->tx.prod_id = entry;
2185  bp->tx.cnt_req++;
2186  /* memory barrier */
2187  mb ( );
2188  return 0;
2189 }
2190 
2191 static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt )
2192 {
2193  u16 cons_id;
2194 
2195  cons_id = bp->nq.cons_id + cnt;
2196  if ( cons_id >= bp->nq.ring_cnt ) {
2197  /* Toggle completion bit when the ring wraps. */
2198  bp->nq.completion_bit ^= 1;
2199  bp->nq.epoch ^= 1;
2200  cons_id = cons_id - bp->nq.ring_cnt;
2201  }
2202  bp->nq.cons_id = cons_id;
2203 }
2204 
2205 void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt )
2206 {
2207  switch ( evt->event_id ) {
2209  if ( evt->event_data1 & 0x01 )
2210  bp->link_status = STATUS_LINK_ACTIVE;
2211  else
2212  bp->link_status = STATUS_LINK_DOWN;
2213  bnxt_set_link ( bp );
2214  dbg_link_status ( bp );
2215  break;
2216  default:
2217  break;
2218  }
2219 }
2220 
2221 static void bnxt_service_cq ( struct net_device *dev )
2222 {
2223  struct bnxt *bp = dev->priv;
2224  struct cmpl_base *cmp;
2225  struct tx_cmpl *tx;
2226  u16 old_cid = bp->cq.cons_id;
2227  int done = SERVICE_NEXT_CQ_BD;
2228  u32 cq_type;
2229 
2230  while ( done == SERVICE_NEXT_CQ_BD ) {
2231  cmp = ( struct cmpl_base * )BD_NOW ( bp->cq.bd_virt,
2232  bp->cq.cons_id,
2233  sizeof ( struct cmpl_base ) );
2234 
2235  if ( ( cmp->info3_v & CMPL_BASE_V ) ^ bp->cq.completion_bit )
2236  break;
2237 
2238  cq_type = cmp->type & CMPL_BASE_TYPE_MASK;
2239  dump_evt ( ( u8 * )cmp, cq_type, bp->cq.cons_id, 0 );
2240  dump_cq ( cmp, bp->cq.cons_id, bp->nq.toggle );
2241 
2242  switch ( cq_type ) {
2243  case CMPL_BASE_TYPE_TX_L2:
2244  tx = ( struct tx_cmpl * )cmp;
2245  bnxt_tx_complete ( dev, ( u16 )tx->opaque );
2246  /* Fall through */
2248  bnxt_adv_cq_index ( bp, 1 );
2249  break;
2250  case CMPL_BASE_TYPE_RX_L2:
2252  done = bnxt_rx_complete ( dev,
2253  ( struct rx_pkt_cmpl * )cmp );
2254  break;
2256  bnxt_link_evt ( bp,
2257  ( struct hwrm_async_event_cmpl * )cmp );
2258  bnxt_adv_cq_index ( bp, 1 );
2259  break;
2260  default:
2262  break;
2263  }
2264  }
2265 
2266  if ( bp->cq.cons_id != old_cid )
2267  bnxt_db_cq ( bp );
2268 }
2269 
2270 static void bnxt_service_nq ( struct net_device *dev )
2271 {
2272  struct bnxt *bp = dev->priv;
2273  struct nq_base *nqp;
2274  u16 old_cid = bp->nq.cons_id;
2275  int done = SERVICE_NEXT_NQ_BD;
2276  u32 nq_type;
2277 
2278  if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
2279  return;
2280 
2281  while ( done == SERVICE_NEXT_NQ_BD ) {
2282  nqp = ( struct nq_base * )BD_NOW ( bp->nq.bd_virt,
2283  bp->nq.cons_id, sizeof ( struct nq_base ) );
2284  if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit )
2285  break;
2286  nq_type = ( nqp->type & NQ_CN_TYPE_MASK );
2287  bp->nq.toggle = ( ( nqp->type & NQ_CN_TOGGLE_MASK ) >> NQ_CN_TOGGLE_SFT );
2288  dump_evt ( ( u8 * )nqp, nq_type, bp->nq.cons_id, 1 );
2289  dump_nq ( nqp, bp->nq.cons_id );
2290 
2291  switch ( nq_type ) {
2293  bnxt_link_evt ( bp,
2294  ( struct hwrm_async_event_cmpl * )nqp );
2295  /* Fall through */
2297  bnxt_adv_nq_index ( bp, 1 );
2298  break;
2299  default:
2301  break;
2302  }
2303  }
2304 
2305  if ( bp->nq.cons_id != old_cid )
2306  bnxt_db_nq ( bp );
2307 }
2308 
2309 static void bnxt_poll ( struct net_device *dev )
2310 {
2311  mb ( );
2312  bnxt_service_nq ( dev );
2313  bnxt_service_cq ( dev );
2314 }
2315 
2316 static void bnxt_close ( struct net_device *dev )
2317 {
2318  struct bnxt *bp = dev->priv;
2319 
2320  DBGP ( "%s\n", __func__ );
2321  bnxt_down_nic (bp);
2322 
2323  /* iounmap PCI BAR ( s ) */
2324  bnxt_down_pci(bp);
2325 
2326  /* Get Bar Address */
2327  bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 );
2328  bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 );
2329  bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 );
2330 
2331 }
2332 
2334  .open = bnxt_open,
2335  .close = bnxt_close,
2336  .poll = bnxt_poll,
2337  .transmit = bnxt_tx,
2338 };
2339 
2340 static int bnxt_init_one ( struct pci_device *pci )
2341 {
2342  struct net_device *netdev;
2343  struct bnxt *bp;
2344  int err = 0;
2345 
2346  DBGP ( "%s\n", __func__ );
2347  /* Allocate network device */
2348  netdev = alloc_etherdev ( sizeof ( *bp ) );
2349  if ( !netdev ) {
2350  DBGP ( "- %s ( ): alloc_etherdev Failed\n", __func__ );
2351  err = -ENOMEM;
2352  goto disable_pdev;
2353  }
2354 
2355  /* Initialise network device */
2357 
2358  /* Driver private area for this device */
2359  bp = netdev->priv;
2360 
2361  /* Set PCI driver private data */
2362  pci_set_drvdata ( pci, netdev );
2363 
2364  /* Clear Private area data */
2365  memset ( bp, 0, sizeof ( *bp ) );
2366  bp->pdev = pci;
2367  bp->dev = netdev;
2368  netdev->dev = &pci->dev;
2369 
2370  /* Enable PCI device */
2371  adjust_pci_device ( pci );
2372 
2373  /* Get PCI Information */
2374  bnxt_get_pci_info ( bp );
2375 
2376  /* Allocate and Initialise device specific parameters */
2377  if ( bnxt_alloc_mem ( bp ) != 0 ) {
2378  DBGP ( "- %s ( ): bnxt_alloc_mem Failed\n", __func__ );
2379  goto err_down_pci;
2380  }
2381 
2382  /* Get device specific information */
2383  if ( bnxt_up_chip ( bp ) != 0 ) {
2384  DBGP ( "- %s ( ): bnxt_up_chip Failed\n", __func__ );
2385  goto err_down_chip;
2386  }
2387 
2388  /* Register Network device */
2389  if ( register_netdev ( netdev ) != 0 ) {
2390  DBGP ( "- %s ( ): register_netdev Failed\n", __func__ );
2391  goto err_down_chip;
2392  }
2393 
2394  return 0;
2395 
2396 err_down_chip:
2397  bnxt_down_chip (bp);
2398  bnxt_free_mem ( bp );
2399 
2400 err_down_pci:
2401  bnxt_down_pci ( bp );
2402  netdev_nullify ( netdev );
2403  netdev_put ( netdev );
2404 
2405 disable_pdev:
2406  pci_set_drvdata ( pci, NULL );
2407  return err;
2408 }
2409 
2410 static void bnxt_remove_one ( struct pci_device *pci )
2411 {
2412  struct net_device *netdev = pci_get_drvdata ( pci );
2413  struct bnxt *bp = netdev->priv;
2414 
2415  DBGP ( "%s\n", __func__ );
2416  /* Unregister network device */
2418 
2419  /* Bring down Chip */
2420  bnxt_down_chip(bp);
2421 
2422  /* Free Allocated resource */
2423  bnxt_free_mem ( bp );
2424 
2425  /* iounmap PCI BAR ( s ) */
2426  bnxt_down_pci ( bp );
2427 
2428  /* Stop network device */
2429  netdev_nullify ( netdev );
2430 
2431  /* Drop refernce to network device */
2432  netdev_put ( netdev );
2433 }
2434 
2435 /* Broadcom NXE PCI driver */
2436 struct pci_driver bnxt_pci_driver __pci_driver = {
2437  .ids = bnxt_nics,
2438  .id_count = ARRAY_SIZE ( bnxt_nics ),
2439  .probe = bnxt_init_one,
2440  .remove = bnxt_remove_one,
2441 };
#define VNIC_CFG_REQ_ENABLES_MRU
Definition: bnxt_hsi.h:5723
#define dump_evt(cq, ty, id, ring)
Definition: bnxt_dbg.h:674
#define RING_ALLOC_REQ_RING_TYPE_NQ
Definition: bnxt_hsi.h:6117
#define u16
Definition: vga.h:20
#define dbg_tx_avail(bp, a, u)
Definition: bnxt_dbg.h:577
uint16_t u16
Definition: stdint.h:21
#define IPXE_VERSION_MAJOR
Definition: bnxt.h:38
#define bnxt_up_chip(bp)
Definition: bnxt.c:2131
#define EINVAL
Invalid argument.
Definition: errno.h:428
#define FLAG_SET(f, b)
Definition: bnxt.h:45
#define SUPPORT_SPEEDS2
Definition: bnxt.h:211
static u32 bnxt_set_ring_info(struct bnxt *bp)
Definition: bnxt.c:866
#define DB_OFFSET_VF
Definition: bnxt.h:197
#define MAX_NQ_DESC_CNT
Definition: bnxt.h:179
#define HWRM_STAT_CTX_FREE
Definition: bnxt_hsi.h:191
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
Definition: bnxt_hsi.h:6582
#define STATUS_SUCCESS
Definition: bnxt.h:62
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
#define DBC_MSG_IDX(idx)
Definition: bnxt.h:198
#define pci_read_byte
Definition: bnxt.h:1021
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition: netdevice.h:752
static void bnxt_service_cq(struct net_device *dev)
Definition: bnxt.c:2221
#define DETECT_MEDIA
Definition: bnxt.h:209
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
Definition: bnxt_hsi.h:3005
#define MEDIUM_SPEED_100PAM4_112GBPS
Definition: bnxt.h:115
#define iob_put(iobuf, len)
Definition: iobuf.h:120
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB
Definition: bnxt_hsi.h:2991
#define MAC_HDR_SIZE
Definition: bnxt.h:190
static int bnxt_hwrm_ver_get(struct bnxt *bp)
Definition: bnxt.c:752
#define dbg_flags(func, flags)
Definition: bnxt_dbg.h:326
static void bnxt_db_tx(struct bnxt *bp, u32 idx)
Definition: bnxt.c:252
#define RX_PKT_V3_CMPL_TYPE_MASK
Definition: bnxt.h:721
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:586
#define MEDIA_AUTO_DETECT_MASK
Definition: bnxt.h:1052
u32 opaque
Definition: bnxt.h:638
#define HWRM_PORT_MAC_CFG
Definition: bnxt_hsi.h:119
#define HWRM_CFA_L2_FILTER_FREE
Definition: bnxt_hsi.h:172
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
Definition: bnxt_hsi.h:6581
#define HWRM_FUNC_CFG
Definition: bnxt_hsi.h:109
#define BD_NOW(bd, entry, len)
Definition: bnxt.h:169
#define IPXE_VERSION_UPDATE
Definition: bnxt.h:40
#define CMPL_BASE_V
Definition: bnxt.h:545
A PCI driver.
Definition: pci.h:247
#define SHORT_CMD_SUPPORTED
Definition: bnxt.h:1024
#define dbg_hw_cmd(bp, func, cmd_len, resp_len, cmd_tmo, err)
Definition: bnxt_dbg.h:378
static unsigned int unsigned int reg
Definition: myson.h:162
#define LINK_SPEED_FW_200G_PAM4_112
Definition: bnxt.h:312
#define TX_BD_FLAGS
Definition: bnxt.h:1035
#define VLAN_VALUE_MASK
Definition: bnxt.h:1056
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN
Definition: bnxt_hsi.h:1787
#define MEDIUM_SPEED_2500MBPS
Definition: bnxt.h:105
#define dbg_alloc_rx_iob(iob, id, cid)
Definition: bnxt_dbg.h:470
__le16 seq_id
Definition: bnxt_hsi.h:71
#define HWRM_CMD_DEFAULT_MULTIPLAYER(a)
Definition: bnxt.h:143
#define DEFAULT_NUMBER_OF_CMPL_RINGS
Definition: bnxt.h:148
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB
Definition: bnxt_hsi.h:3080
static int bnxt_hwrm_ring_alloc_tx(struct bnxt *bp)
Definition: bnxt.c:1892
#define dbg_func_resource_qcaps(bp)
Definition: bnxt_dbg.h:320
#define RESP_BUFFER_SIZE
Definition: bnxt.h:164
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
#define DEFAULT_NUMBER_OF_RING_GRPS
Definition: bnxt.h:151
#define NQ_CN_TOGGLE_MASK
Definition: bnxt.h:573
#define RX_PKT_CMPL_METADATA_VID_MASK
Definition: bnxt.h:665
__le16 def_req_timeout
Definition: bnxt_hsi.h:440
#define SERVICE_NEXT_NQ_BD
Definition: bnxt.h:187
#define HWRM_VER_GET
Definition: bnxt_hsi.h:98
#define CQ_DOORBELL_KEY_IDX(a)
Definition: bnxt.h:1031
Error codes.
int(* hwrm_func_t)(struct bnxt *bp)
Definition: bnxt.c:2056
#define HWRM_FUNC_RESET
Definition: bnxt_hsi.h:103
#define FUNC_DRV_RGTR_REQ_ENABLES_VER
Definition: bnxt_hsi.h:1736
#define dbg_short_cmd(sreq, func, len)
Definition: bnxt_dbg.h:398
__le16 signature
Definition: bnxt_hsi.h:87
#define CHIP_NUM_57608
Definition: bnxt.h:1068
static int bnxt_get_device_address(struct bnxt *bp)
Definition: bnxt.c:163
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB
Definition: bnxt_hsi.h:2986
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
Definition: bnxt.h:660
#define test_if
Definition: bnxt.h:1018
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID
Definition: bnxt_hsi.h:6442
#define dbg_rxp(iob, rx_len, drop)
Definition: bnxt_dbg.h:473
#define HWRM_CMD_FLASH_MULTIPLAYER(a)
Definition: bnxt.h:144
#define RX_MASK_PROMISCUOUS_MODE
Definition: bnxt.h:96
unsigned long driver_data
Arbitrary driver data.
Definition: pci.h:178
#define LINK_SPEED_FW_10G
Definition: bnxt.h:294
I/O buffers.
void free_iob(struct io_buffer *iobuf)
Free I/O buffer.
Definition: iobuf.c:146
#define RX_PKT_V3_CMPL_HI_METADATA0_VID_MASK
Definition: bnxt.h:860
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB
Definition: bnxt_hsi.h:3079
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:249
static void hwrm_init(struct bnxt *bp, struct input *req, u16 cmd, u16 len)
Definition: bnxt.c:686
#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
Definition: bnxt_hsi.h:3000
u16 flags_type
Definition: bnxt.h:720
#define NQ_RING_BUFFER_SIZE
Definition: bnxt.h:180
int bnxt_hwrm_run(hwrm_func_t cmds[], struct bnxt *bp)
Definition: bnxt.c:2113
static int bnxt_hwrm_func_cfg_req(struct bnxt *bp)
Definition: bnxt.c:1028
#define FLAG_TEST(f, b)
Definition: bnxt.h:46
#define VALID_DRIVER_REG
Definition: bnxt.h:903
#define PCI_COMMAND_INTX_DISABLE
Interrupt disable.
Definition: pci.h:32
static void const void * src
Definition: crypto.h:244
static void bnxt_db_nq(struct bnxt *bp)
Definition: bnxt.c:214
#define dbg_tx_vlan(bp, src, plen, len)
Definition: bnxt_dbg.h:578
#define HWRM_VERSION_MINOR
Definition: bnxt_hsi.h:369
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56
Definition: bnxt_hsi.h:3087
__le16 req_type
Definition: bnxt_hsi.h:86
uint64_t desc
Microcode descriptor list physical address.
Definition: ucode.h:12
#define RING_FREE(bp, rid, flag)
Definition: bnxt.h:1043
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK
Definition: bnxt_hsi.h:2969
#define MEDIUM_SPEED_200PAM4_112GBPS
Definition: bnxt.h:116
#define write32
Definition: bnxt.h:1019
#define LINK_POLL_WAIT_TIME
Definition: bnxt.h:174
#define CMPL_BASE_TYPE_RX_L2_V3
Definition: bnxt.h:530
static int bnxt_hwrm_ring_free_rx(struct bnxt *bp)
Definition: bnxt.c:1934
Definition: bnxt_hsi.h:68
static void * bnxt_pci_base(struct pci_device *pdev, unsigned int reg)
Definition: bnxt.c:116
#define RING_FREE_REQ_RING_TYPE_TX
Definition: bnxt_hsi.h:6195
switch(len)
Definition: string.h:61
static int bnxt_hwrm_ring_alloc_grp(struct bnxt *bp)
Definition: bnxt.c:1764
uint16_t bp
Definition: registers.h:23
static u32 bnxt_tx_avail(struct bnxt *bp)
Definition: bnxt.c:330
#define LINK_SPEED_FW_100G_PAM4_112
Definition: bnxt.h:310
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
Definition: bnxt_hsi.h:6415
static void *__malloc malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.h:62
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
Definition: bnxt_hsi.h:553
#define BNXT_FLAG_IS_CHIP_P5
Definition: bnxt.h:56
#define PCI_BASE_ADDRESS_0
Definition: pci.h:62
unsigned long dma_addr_t
Definition: bnx2.h:20
#define SET_MBA(p, m, s)
Definition: bnxt.h:1045
#define HWRM_FUNC_BACKING_STORE_CFG
Definition: bnxt_hsi.h:283
#define D3_LINK_SPEED_FW_NUM
Definition: bnxt.h:323
#define D3_SPEED_FW_SHIFT
Definition: bnxt.h:1051
static int bnxt_hwrm_ring_alloc(struct bnxt *bp, u8 type)
Definition: bnxt.c:1808
#define write64
Definition: bnxt.h:1020
void bnxt_link_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition: bnxt.c:2205
__le16 default_rx_ring_id
Definition: bnxt_hsi.h:5732
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:230
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
Definition: bnxt.c:1052
uint32_t data_len
Microcode data size (or 0 to indicate 2000 bytes)
Definition: ucode.h:26
__le16 max_ext_req_len
Definition: bnxt_hsi.h:466
#define BNXT_RX_STD_DMA_SZ
Definition: bnxt.h:167
iPXE timers
#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
Definition: bnxt_hsi.h:1533
#define PCI_COMMAND
PCI command.
Definition: pci.h:25
void bnxt_mm_nic(struct bnxt *bp)
Definition: bnxt.c:584
static int bnxt_hwrm_cfa_l2_filter_alloc(struct bnxt *bp)
Definition: bnxt.c:1140
#define LINK_SPEED_DRV_NUM
Definition: bnxt.h:221
#define STATUS_LINK_DOWN
Definition: bnxt.h:67
#define BNXT_FLAG_MULTI_HOST
Definition: bnxt.h:51
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
Definition: bnxt_hsi.h:3068
#define MEDIUM_SPEED_10GBPS
Definition: bnxt.h:106
#define NQ_CN_TOGGLE_SFT
Definition: bnxt.h:574
#define LM_PAGE_BITS(a)
Definition: bnxt.h:166
#define HWRM_FUNC_QCAPS
Definition: bnxt_hsi.h:107
#define LINK_SPEED_FW_50G_PAM4
Definition: bnxt.h:306
static int bnxt_hwrm_func_resource_qcaps(struct bnxt *bp)
Definition: bnxt.c:803
#define GET_MEDIUM_SPEED(m)
Definition: bnxt.h:123
static int bnxt_hwrm_func_reset_req(struct bnxt *bp)
Definition: bnxt.c:1014
#define FUNC_CFG_REQ_EVB_MODE_NO_EVB
Definition: bnxt_hsi.h:1607
#define PORT_PHY_QCFG_RESP_LINK_LINK
Definition: bnxt_hsi.h:3149
#define LINK_SPEED_FW_25G
Definition: bnxt.h:296
static int bnxt_init_one(struct pci_device *pci)
Definition: bnxt.c:2340
#define VLAN_HDR_SIZE
Definition: bnxt.h:191
static void bnxt_adv_cq_index(struct bnxt *bp, u16 cnt)
Definition: bnxt.c:505
static int bnxt_hwrm_nvm_get_variable_req(struct bnxt *bp, u16 data_len, u16 option_num, u16 dimensions, u16 index_0)
Definition: bnxt.c:1278
#define D3_SPEED_FW_MASK
Definition: bnxt.h:1050
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:154
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:129
#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
Definition: bnxt_hsi.h:2940
#define MEDIUM_SPEED_50GBPS
Definition: bnxt.h:110
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE
Definition: bnxt_hsi.h:2965
#define PCI_SUBSYSTEM_ID
PCI subsystem ID.
Definition: pci.h:78
struct device dev
Generic device.
Definition: pci.h:208
#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER
Definition: bnxt_hsi.h:1742
#define BNXT_FLAG_NPAR_MODE
Definition: bnxt.h:52
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK
Definition: bnxt_hsi.h:6428
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
Definition: bnxt_hsi.h:1549
u32 v
Definition: bnxt.h:584
u16 len
Definition: bnxt.h:474
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID
Definition: bnxt_hsi.h:6110
void bnxt_add_vlan(struct io_buffer *iob, u16 vlan)
Definition: bnxt.c:264
static void bnxt_close(struct net_device *dev)
Definition: bnxt.c:2316
dma_addr_t addr
Definition: bnxt.h:31
#define MEDIUM_SPEED_200GBPS
Definition: bnxt.h:112
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST
Definition: bnxt_hsi.h:1410
Dynamic memory allocation.
#define HWRM_FUNC_QCFG
Definition: bnxt_hsi.h:108
static int bnxt_hwrm_func_qcaps_req(struct bnxt *bp)
Definition: bnxt.c:919
#define PHY_STATUS
Definition: bnxt.h:207
#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT
Definition: bnxt.h:873
void bnxt_rx_process(struct net_device *dev, struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi)
Definition: bnxt.c:519
static struct pci_device_id bnxt_nics[]
Definition: bnxt.c:26
#define RX_MASK_ACCEPT_NONE
Definition: bnxt.h:90
#define MEDIUM_SPEED_25GBPS
Definition: bnxt.h:108
#define CHIP_NUM_57502
Definition: bnxt.h:1066
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:515
#define NQ_CN_TYPE_MASK
Definition: bnxt.h:571
#define RX_PKT_V3_CMPL_TYPE_RX_L2_V3
Definition: bnxt.h:729
int bnxt_free_rx_iob(struct bnxt *bp)
Definition: bnxt.c:375
#define dbg_pci(bp, func, creg)
Definition: bnxt_dbg.h:140
#define dump_nq(nq, id)
Definition: bnxt_dbg.h:517
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:359
#define DEFAULT_NUMBER_OF_STAT_CTXS
Definition: bnxt.h:152
#define STATUS_LINK_ACTIVE
Definition: bnxt.h:66
u16 type
Definition: bnxt.h:563
#define HWRM_RING_GRP_ALLOC
Definition: bnxt_hsi.h:165
#define ENOMEM
Not enough space.
Definition: errno.h:534
#define pci_write_word
Definition: bnxt.h:1023
#define LINK_SPEED_FW_NUM
Definition: bnxt.h:287
#define LINK_SPEED_FW_400G_PAM4_112
Definition: bnxt.h:316
u32 opaque
Definition: bnxt.h:475
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112
Definition: bnxt_hsi.h:3089
#define VALID_RING_CQ
Definition: bnxt.h:905
int bnxt_alloc_mem(struct bnxt *bp)
Definition: bnxt.c:659
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX
Definition: bnxt_hsi.h:3009
#define LINK_SPEED_FW_1G
Definition: bnxt.h:292
#define NO_MORE_NQ_BD_TO_SERVICE
Definition: bnxt.h:186
#define QCFG_PHY_ALL
Definition: bnxt.h:212
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56
Definition: bnxt_hsi.h:3085
#define VALID_VNIC_ID
Definition: bnxt.h:909
static int bnxt_hwrm_port_phy_qcaps_req(struct bnxt *bp)
Definition: bnxt.c:990
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID
Definition: bnxt_hsi.h:5725
#define RX_MASK
Definition: bnxt.h:175
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
#define MAX_RX_DESC_CNT
Definition: bnx2.h:3885
#define VLAN_VALUE_SHIFT
Definition: bnxt.h:1057
static int bnxt_hwrm_set_rx_mask(struct bnxt *bp, u32 rx_mask)
Definition: bnxt.c:1224
static int wait_resp(struct bnxt *bp, u32 tmo, u16 len, const char *func)
Definition: bnxt.c:722
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB
Definition: bnxt_hsi.h:2989
#define DBGP(...)
Definition: compiler.h:532
int bnxt_post_rx_buffers(struct bnxt *bp)
Definition: bnxt.c:425
#define REQ_BUFFER_SIZE
Definition: bnxt.h:163
#define HWRM_QUEUE_QPORTCFG
Definition: bnxt_hsi.h:134
#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR
Definition: bnxt_hsi.h:1544
#define SHORT_CMD_REQUIRED
Definition: bnxt.h:1025
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
#define RX_RING_BUFFER_SIZE
Definition: bnxt.h:158
#define HWRM_CFA_L2_SET_RX_MASK
Definition: bnxt_hsi.h:174
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:572
#define DEFAULT_NUMBER_OF_RX_RINGS
Definition: bnxt.h:150
#define dbg_mem(bp, func)
Definition: bnxt_dbg.h:175
Ethernet protocol.
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
Definition: bnxt.c:1081
#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
Definition: bnxt.h:535
static void bnxt_tx_adjust_pkt(struct bnxt *bp, struct io_buffer *iob)
Definition: bnxt.c:2144
#define HWRM_FUNC_VF_CFG
Definition: bnxt_hsi.h:101
#define BYTE_SWAP_S(w)
Definition: bnxt.h:193
#define dbg_func_qcaps(bp)
Definition: bnxt_dbg.h:321
static int bnxt_reset_rx_mask(struct bnxt *bp)
Definition: bnxt.c:2051
void * priv
Driver private data.
Definition: netdevice.h:431
#define SPEED_FW_SHIFT
Definition: bnxt.h:1049
#define CHIP_NUM_57508
Definition: bnxt.h:1064
#define LINK_SPEED_FW_AUTONEG
Definition: bnxt.h:290
static int bnxt_hwrm_ring_free_nq(struct bnxt *bp)
Definition: bnxt.c:1956
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID
Definition: bnxt_hsi.h:6109
static int bnxt_hwrm_ring_alloc_rx(struct bnxt *bp)
Definition: bnxt.c:1898
#define DB_OFFSET_PF
Definition: bnxt.h:196
#define dbg_rx_vlan(bp, metadata, flags2, rx_vid)
Definition: bnxt_dbg.h:469
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:774
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP
Definition: bnxt_hsi.h:5719
#define PCI_BASE_ADDRESS_2
Definition: pci.h:64
static int bnxt_hwrm_cfa_l2_filter_free(struct bnxt *bp)
Definition: bnxt.c:1181
#define pci_read_word16
Definition: bnxt.h:1022
#define HWRM_CFA_L2_FILTER_ALLOC
Definition: bnxt_hsi.h:171
#define LINK_SPEED_FW_40G
Definition: bnxt.h:298
#define u32
Definition: vga.h:21
#define bnxt_down_nic(bp)
Definition: bnxt.c:2132
void udelay(unsigned long usecs)
Delay for a fixed number of microseconds.
Definition: timer.c:60
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED
Definition: bnxt_hsi.h:2976
u32 info3_v
Definition: bnxt.h:544
static int bnxt_hwrm_set_async_event(struct bnxt *bp)
Definition: bnxt.c:1102
#define BNXT_DMA_ALIGNMENT
Definition: bnxt.h:161
static struct net_device * netdev
Definition: gdbudp.c:52
uint64_t u64
Definition: stdint.h:25
#define PORT_PHY_FLAGS
Definition: bnxt.h:1040
#define u8
Definition: igbvf_osdep.h:38
#define LINK_SPEED_FW_MASK
Definition: bnxt.h:288
static int bnxt_hwrm_ring_alloc_cq(struct bnxt *bp)
Definition: bnxt.c:1886
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
#define STATUS_TIMEOUT
Definition: bnxt.h:86
#define NQ_CN_TYPE_CQ_NOTIFICATION
Definition: bnxt.h:576
static void bnxt_tx_complete(struct net_device *dev, u16 hw_idx)
Definition: bnxt.c:362
#define CMPL_BASE_TYPE_RX_L2
Definition: bnxt.h:526
#define TX_AVAIL(r)
Definition: bnxt.h:184
static void bnxt_down_pci(struct bnxt *bp)
Definition: bnxt.c:99
#define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED
Definition: bnxt_hsi.h:4135
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56
Definition: bnxt_hsi.h:3086
#define HWRM_NA_SIGNATURE
Definition: bnxt_hsi.h:362
__le16 cmpl_ring
Definition: bnxt_hsi.h:70
#define bnxt_down_chip(bp)
Definition: bnxt.c:2130
#define NUM_RX_BUFFERS
Definition: bnxt.h:153
#define HWRM_FUNC_DRV_RGTR
Definition: bnxt_hsi.h:115
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition: netdevice.c:941
static void dev_p5_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag)
Definition: bnxt.c:185
static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, u16 idx)
Definition: bnxt.c:1239
static int bnxt_hwrm_ring_free_tx(struct bnxt *bp)
Definition: bnxt.c:1919
static int bnxt_get_pci_info(struct bnxt *bp)
Definition: bnxt.c:125
#define LINK_SPEED_FW_50G
Definition: bnxt.h:300
hwrm_func_t bring_up_chip[]
Definition: bnxt.c:2077
u32 flags2
Definition: bnxt.h:652
static void dev_p7_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, u32 toggle)
Definition: bnxt.c:200
__le64 req_addr
Definition: bnxt_hsi.h:92
static void bnxt_remove_one(struct pci_device *pci)
Definition: bnxt.c:2410
#define SUPPORT_SPEEDS
Definition: bnxt.h:210
#define TX_RING_BUFFER_SIZE
Definition: bnxt.h:157
#define TX_BD_SHORT_FLAGS_LHINT_LT512
Definition: bnxt.h:468
#define HWRM_FUNC_RESOURCE_QCAPS
Definition: bnxt_hsi.h:280
#define MAX_CQ_DESC_CNT
Definition: bnxt.h:156
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID
Definition: bnxt_hsi.h:5724
#define bnxt_up_nic(bp)
Definition: bnxt.c:2133
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB
Definition: bnxt_hsi.h:3067
void bnxt_mm_init(struct bnxt *bp, const char *func)
Definition: bnxt.c:563
#define BNXT_CQ_INTR_MODE(vf)
Definition: bnxt.h:170
#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM
Definition: bnxt.h:361
#define dbg_rx_cid(idx, cid)
Definition: bnxt_dbg.h:471
void bnxt_set_txq(struct bnxt *bp, int entry, dma_addr_t mapping, int len)
Definition: bnxt.c:342
__le16 default_cmpl_ring_id
Definition: bnxt_hsi.h:5733
#define dbg_rx_stat(bp)
Definition: bnxt_dbg.h:474
#define TX_BD_SHORT_FLAGS_LHINT_LT1K
Definition: bnxt.h:469
#define VALID_RING_NQ
Definition: bnxt.h:912
#define STATUS_FAILURE
Definition: bnxt.h:63
static int bnxt_rx_complete(struct net_device *dev, struct rx_pkt_cmpl *rx)
Definition: bnxt.c:543
#define dbg_alloc_rx_iob_fail(iob_idx, cons_id)
Definition: bnxt_dbg.h:472
#define HWRM_STAT_CTX_ALLOC
Definition: bnxt_hsi.h:190
static int bnxt_tx(struct net_device *dev, struct io_buffer *iob)
Definition: bnxt.c:2160
union aes_table_entry entry[256]
Table entries, indexed by S(N)
Definition: aes.c:26
#define DBC_DBC_TYPE_SRQ
Definition: bnxt.h:437
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Find the size of a PCI BAR.
Definition: pciextra.c:92
#define CMPL_BASE_TYPE_TX_L2
Definition: bnxt.h:525
#define VLAN_SHIFT
Definition: bnxt.h:1055
#define DBC_DBC_TYPE_CQ_ARMALL
Definition: bnxt.h:441
#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
Definition: bnxt_hsi.h:1535
PCI bus.
static int bnxt_get_link_speed(struct bnxt *bp)
Definition: bnxt.c:1297
A PCI device.
Definition: pci.h:206
int register_netdev(struct net_device *netdev)
Register network device.
Definition: netdevice.c:759
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:155
__le16 req_type
Definition: bnxt_hsi.h:69
#define MEDIUM_SPEED_1000MBPS
Definition: bnxt.h:104
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT
Definition: bnxt.h:677
#define VALID_L2_FILTER
Definition: bnxt.h:911
#define HWRM_CMD_WAIT(b)
Definition: bnxt.h:146
A network device.
Definition: netdevice.h:352
static int bnxt_hwrm_vnic_free(struct bnxt *bp)
Definition: bnxt.c:1997
#define PORT_MAC_CFG_REQ_LPBK_NONE
Definition: bnxt_hsi.h:3544
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:528
#define ARRAY_SIZE(x)
Definition: efx_common.h:43
u16 type
Definition: bnxt.h:522
u8 bnxt_rx_drop(struct bnxt *bp, struct io_buffer *iob, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len)
Definition: bnxt.c:455
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB
Definition: bnxt_hsi.h:2983
#define dump_tx_pkt(pkt, len, idx)
Definition: bnxt_dbg.h:581
#define CQ_RING_BUFFER_SIZE
Definition: bnxt.h:160
hwrm_func_t bring_down_nic[]
Definition: bnxt.c:2063
static void hwrm_write_req(struct bnxt *bp, void *req, u32 cnt)
Definition: bnxt.c:696
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
Definition: bnxt_hsi.h:2967
#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
Definition: bnxt_hsi.h:6579
#define PHY_SPEED
Definition: bnxt.h:208
static int bnxt_hwrm_ring_free_cq(struct bnxt *bp)
Definition: bnxt.c:1904
#define PORT_PHY_CFG_REQ_FLAGS_FORCE
Definition: bnxt_hsi.h:2942
#define FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM
Definition: bnxt.h:369
#define dump_tx_stat(bp)
Definition: bnxt_dbg.h:580
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE
Definition: bnxt_hsi.h:1735
#define RX_MASK_ACCEPT_ALL_MULTICAST
Definition: bnxt.h:93
static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp)
Definition: bnxt.c:1489
#define ETH_ALEN
Definition: if_ether.h:8
#define ETH_ZLEN
Definition: if_ether.h:10
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
Definition: bnxt.c:1448
#define STAT_CTX_ID
Definition: bnxt.h:183
A PCI device ID list entry.
Definition: pci.h:170
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112
Definition: bnxt_hsi.h:3088
static void bnxt_hwrm_assign_resources(struct bnxt *bp)
Definition: bnxt.c:901
#define SHORT_REQ_SIGNATURE_SHORT_CMD
Definition: bnxt_hsi.h:88
FILE_LICENCE(GPL2_ONLY)
#define TX_BD_SHORT_FLAGS_LHINT_GTE2K
Definition: bnxt.h:471
static int bnxt_hwrm_port_mac_cfg(struct bnxt *bp)
Definition: bnxt.c:1474
#define VF_CFG_ENABLE_FLAGS
Definition: bnxt.h:1058
#define HWRM_VNIC_CFG
Definition: bnxt_hsi.h:150
#define TX_BD_SHORT_FLAGS_LHINT_LT2K
Definition: bnxt.h:470
static int bnxt_hwrm_ring_alloc_nq(struct bnxt *bp)
Definition: bnxt.c:1949
static int is_valid_ether_addr(const void *addr)
Check if Ethernet address is valid.
Definition: ethernet.h:77
void * memmove(void *dest, const void *src, size_t len) __nonnull
static void bnxt_set_link(struct bnxt *bp)
Definition: bnxt.c:177
static int bnxt_query_phy_link(struct bnxt *bp)
Definition: bnxt.c:1638
#define HWRM_FUNC_DRV_UNRGTR
Definition: bnxt_hsi.h:112
#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT
Definition: bnxt_hsi.h:6460
#define ETHERTYPE_VLAN
Definition: bnxt.h:192
#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
Definition: bnxt_hsi.h:1534
#define dbg_chip_info(bp)
Definition: bnxt_dbg.h:324
u16 flags_type
Definition: bnxt.h:455
void __asmcall int val
Definition: setjmp.h:28
#define FLAG_RESET(f, b)
Definition: bnxt.h:47
Network device operations.
Definition: netdevice.h:213
#define HWRM_RING_FREE
Definition: bnxt_hsi.h:160
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:548
static void bnxt_db_rx(struct bnxt *bp, u32 idx)
Definition: bnxt.c:241
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX
Definition: bnxt_hsi.h:3008
struct device * dev
Underlying hardware device.
Definition: netdevice.h:364
#define HWRM_VNIC_ALLOC
Definition: bnxt_hsi.h:148
#define HWRM_PORT_PHY_QCFG
Definition: bnxt_hsi.h:125
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB
Definition: bnxt_hsi.h:2990
#define MEDIUM_SPEED_50PAM4GBPS
Definition: bnxt.h:113
#define TX_IN_USE(a, b, c)
Definition: bnxt.h:185
static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
Definition: bnxt.c:1718
static int bnxt_hwrm_vnic_cfg(struct bnxt *bp)
Definition: bnxt.c:2020
#define MEDIUM_SPEED_AUTONEG
Definition: bnxt.h:100
Network device management.
#define dump_cq(cq, id, toggle)
Definition: bnxt_dbg.h:516
#define CMPL_BASE_TYPE_STAT_EJECT
Definition: bnxt.h:531
#define MEDIUM_SPEED_100PAM4GBPS
Definition: bnxt.h:114
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:369
u8 bnxt_is_pci_vf(struct pci_device *pdev)
Check if Virtual Function.
Definition: bnxt.c:91
#define MAX_TX_DESC_CNT
Definition: bnx2.h:3881
#define dbg_tx_done(pkt, len, idx)
Definition: bnxt_dbg.h:583
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition: timer.c:78
#define PCICFG_ME_REGISTER
Definition: bnxt.h:132
#define LINK_SPEED_FW_2_5G
Definition: bnxt.h:318
#define BNXT_FLAG_LINK_SPEEDS2
Definition: bnxt.h:55
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB
Definition: bnxt_hsi.h:3081
#define HWRM_NVM_GET_VARIABLE
Definition: bnxt_hsi.h:308
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB
Definition: bnxt_hsi.h:3066
#define HWRM_VERSION_UPDATE
Definition: bnxt_hsi.h:370
struct net_device * dev
Definition: bnxt.h:932
#define LINK_DEFAULT_TIMEOUT
Definition: bnxt.h:173
#define CMPL_DOORBELL_KEY_CMPL
Definition: bnxt.h:409
Definition: bnxt.h:479
#define NO_MORE_CQ_BD_TO_SERVICE
Definition: bnxt.h:188
#define prn_set_speed(speed)
Definition: bnxt_dbg.h:323
#define RING_ALLOC_REQ_RING_TYPE_TX
Definition: bnxt_hsi.h:6113
static void short_hwrm_cmd_req(struct bnxt *bp, u16 len)
Definition: bnxt.c:707
static int bnxt_hwrm_ring_free_grp(struct bnxt *bp)
Definition: bnxt.c:1741
#define RX_MASK_ACCEPT_MULTICAST
Definition: bnxt.h:92
#define RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_ACT_REC_PTR
Definition: bnxt.h:806
#define SPEED_DRV_SHIFT
Definition: bnxt.h:1047
uint32_t len
Length.
Definition: ena.h:14
#define dbg_fw_ver(resp, tmo)
Definition: bnxt_dbg.h:319
uint32_t type
Operating system type.
Definition: ena.h:12
#define ENOBUFS
No buffer space available.
Definition: errno.h:498
#define MEDIUM_FULL_DUPLEX
Definition: bnxt.h:126
#define MEDIUM_SPEED_400PAM4_112GBPS
Definition: bnxt.h:118
static int bnxt_get_vlan(struct bnxt *bp)
Definition: bnxt.c:1382
#define VALID_STAT_CTX
Definition: bnxt.h:904
Media Independent Interface constants.
void bnxt_free_mem(struct bnxt *bp)
Definition: bnxt.c:616
#define VALID_RING_RX
Definition: bnxt.h:907
__le16 num_hw_ring_grps
Definition: bnxt_hsi.h:1562
#define FUNC_CFG_REQ_ENABLES_EVB_MODE
Definition: bnxt_hsi.h:1547
#define RING_ALLOC_REQ_RING_TYPE_RX
Definition: bnxt_hsi.h:6114
static void bnxt_adv_nq_index(struct bnxt *bp, u16 cnt)
Definition: bnxt.c:2191
#define dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx)
Definition: bnxt_dbg.h:468
__le16 target_id
Definition: bnxt_hsi.h:72
#define RING_FREE_REQ_RING_TYPE_L2_CMPL
Definition: bnxt_hsi.h:6194
static int bnxt_alloc_rx_iob(struct bnxt *bp, u16 cons_id, u16 iob_idx)
Definition: bnxt.c:408
#define RING_FREE_REQ_RING_TYPE_RX
Definition: bnxt_hsi.h:6196
static u16 bnxt_get_rx_vlan(struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi)
Definition: bnxt.c:285
void * data
Start of data.
Definition: iobuf.h:48
static int bnxt_hwrm_vnic_alloc(struct bnxt *bp)
Definition: bnxt.c:1974
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2
Definition: bnxt_hsi.h:2978
#define barrier()
Optimisation barrier.
Definition: compiler.h:655
static int bnxt_hwrm_backing_store_qcfg(struct bnxt *bp)
Definition: bnxt.c:1416
#define NQ_CN_V
Definition: bnxt.h:590
#define FUNC_CFG_REQ_ENABLES_NUM_MSIX
Definition: bnxt_hsi.h:1551
#define HWRM_PORT_PHY_CFG
Definition: bnxt_hsi.h:118
#define TX_RING_QID
Definition: bnxt.h:181
#define LINK_SPEED_FW_100G
Definition: bnxt.h:302
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK
Definition: bnxt_hsi.h:2979
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
Definition: bnxt_hsi.h:1739
__le64 resp_addr
Definition: bnxt_hsi.h:73
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition: ethernet.c:264
u8 rx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets from the AP.
Definition: wpa.h:234
#define LINK_SPEED_FW_200G
Definition: bnxt.h:304
#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
Definition: bnxt_hsi.h:1538
struct pci_device_id * id
Driver device ID.
Definition: pci.h:243
u16 errors_v2
Definition: bnxt.h:672
#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
Definition: bnxt_hsi.h:6580
static int bnxt_open(struct net_device *dev)
Definition: bnxt.c:2135
#define FUNC_CFG_REQ_ENABLES_NUM_VNICS
Definition: bnxt_hsi.h:1537
void iounmap(volatile const void *io_addr)
Unmap I/O address.
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
Definition: bnxt.c:1696
static void bnxt_poll(struct net_device *dev)
Definition: bnxt.c:2309
#define MAX_ETHERNET_PACKET_BUFFER_SIZE
Definition: bnxt.h:147
#define DEFAULT_NUMBER_OF_TX_RINGS
Definition: bnxt.h:149
#define BNXT_FLAG_PCI_VF
Definition: bnxt.h:54
#define RING_FREE_REQ_RING_TYPE_NQ
Definition: bnxt_hsi.h:6199
static int bnxt_set_rx_mask(struct bnxt *bp)
Definition: bnxt.c:2046
#define HWRM_RING_ALLOC
Definition: bnxt_hsi.h:159
#define DMA_BUFFER_SIZE
Definition: bnxt.h:165
static int bnxt_hwrm_func_qcfg_req(struct bnxt *bp)
Definition: bnxt.c:949
#define SPEED_FW_MASK
Definition: bnxt.h:1048
#define BNXT_FLAG_IS_CHIP_P7
Definition: bnxt.h:58
#define SERVICE_NEXT_CQ_BD
Definition: bnxt.h:189
#define DBC_MSG_TOGGLE(idx)
Definition: bnxt.h:205
#define SET_MEDIUM_DUPLEX(bp, d)
Definition: bnxt.h:129
int bnxt_vlan_drop(struct bnxt *bp, u16 rx_vlan)
Definition: bnxt.c:310
static void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.h:77
#define SPEED_DRV_MASK
Definition: bnxt.h:1046
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB
Definition: bnxt_hsi.h:3083
#define DBC_DBC_TYPE_SQ
Definition: bnxt.h:435
static u16 bnxt_get_pkt_vlan(char *src)
Definition: bnxt.c:278
void mb(void)
Memory barrier.
#define dbg_link_status(bp)
Definition: bnxt_dbg.h:675
#define DBC_MSG_EPCH(idx)
Definition: bnxt.h:203
#define RX_DOORBELL_KEY_RX
Definition: bnxt.h:397
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL
Definition: bnxt_hsi.h:6112
#define RX_PKT_CMPL_V2
Definition: bnxt.h:673
#define PCI_SUBSYSTEM_VENDOR_ID
PCI subsystem vendor ID.
Definition: pci.h:75
#define NEXT_IDX(N, S)
Definition: bnxt.h:168
#define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_SFT
Definition: bnxt.h:825
#define BNXT_FLAG_HWRM_SHORT_CMD_SUPP
Definition: bnxt.h:48
#define VALID_RING_TX
Definition: bnxt.h:906
#define dbg_num_rings(bp)
Definition: bnxt_dbg.h:325
#define MEDIUM_SPEED_40GBPS
Definition: bnxt.h:109
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56
Definition: bnxt_hsi.h:3084
#define LINK_SPEED_FW_400G_PAM4
Definition: bnxt.h:314
#define TX_DOORBELL_KEY_TX
Definition: bnxt.h:387
#define dbg_link_state(bp, tmo)
Definition: bnxt_dbg.h:676
#define RX_RING_QID
Definition: bnxt.h:182
#define FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM
Definition: bnxt.h:373
#define RING_ALLOC_REQ_INT_MODE_POLL
Definition: bnxt_hsi.h:6168
#define dbg_func_qcfg(bp)
Definition: bnxt_dbg.h:322
u32 set_rx_mask(u32 rx_mask)
Definition: bnxt.c:1205
#define PCI_BASE_ADDRESS_4
Definition: pci.h:66
u32 metadata
Definition: bnxt.h:664
#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE
Definition: bnxt_hsi.h:5712
static void bnxt_service_nq(struct net_device *dev)
Definition: bnxt.c:2270
Definition: bnxt.h:562
#define HWRM_VERSION_MAJOR
Definition: bnxt_hsi.h:368
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
#define LINK_SPEED_FW_100G_PAM4
Definition: bnxt.h:308
hwrm_func_t bring_down_chip[]
Definition: bnxt.c:2058
#define MEDIUM_SPEED_100GBPS
Definition: bnxt.h:111
u16 len
Definition: bnxt.h:637
#define CHIP_NUM_57504
Definition: bnxt.h:1065
hwrm_func_t bring_up_nic[]
Definition: bnxt.c:2095
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE
Definition: bnxt_hsi.h:2060
#define dbg_tx_pad(plen, len)
Definition: bnxt_dbg.h:579
static void bnxt_set_rx_desc(u8 *buf, struct io_buffer *iob, u16 cid, u32 idx)
Definition: bnxt.c:395
#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME
Definition: bnxt_hsi.h:1175
static struct net_device_operations bnxt_netdev_ops
Definition: bnxt.c:2333
#define HWRM_FUNC_BACKING_STORE_QCFG
Definition: bnxt_hsi.h:284
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR
Definition: bnxt_hsi.h:6427
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB
Definition: bnxt_hsi.h:2988
#define HWRM_RING_GRP_FREE
Definition: bnxt_hsi.h:166
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0
Definition: bnxt_hsi.h:1426
#define BNXT_FLAG_RESOURCE_QCAPS_SUPPORT
Definition: bnxt.h:50
#define FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED
Definition: bnxt.h:378
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
#define SET_LINK(p, m, s)
Definition: bnxt.h:1044
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB
Definition: bnxt_hsi.h:3082
__le16 max_req_win_len
Definition: bnxt_hsi.h:438
#define DBC_DBC_TYPE_NQ_ARM
Definition: bnxt.h:446
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition: pci.h:303
#define CMPL_BASE_TYPE_MASK
Definition: bnxt.h:523
#define SET_MEDIUM_SPEED(bp, s)
Definition: bnxt.h:124
void iob_pad(struct io_buffer *iobuf, size_t min_len)
Pad I/O buffer.
Definition: iobpad.c:49
static int bnxt_hwrm_backing_store_cfg(struct bnxt *bp)
Definition: bnxt.c:1431
#define VLAN_MASK
Definition: bnxt.h:1054
#define GRC_COM_CHAN_TRIG
Definition: bnxt.h:134
#define MEDIUM_SPEED_400PAM4GBPS
Definition: bnxt.h:117
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST
Definition: bnxt_hsi.h:6419
struct bofm_section_header done
Definition: bofm_test.c:46
#define HWRM_MAX_REQ_LEN
Definition: bnxt_hsi.h:363
#define HWRM_CMD_POLL_WAIT_TIME
Definition: bnxt.h:142
uint8_t u8
Definition: stdint.h:19
#define HWRM_VNIC_FREE
Definition: bnxt_hsi.h:149
uint32_t u32
Definition: stdint.h:23
#define MEDIA_AUTO_DETECT_SHIFT
Definition: bnxt.h:1053
#define VALID_RING_GRP
Definition: bnxt.h:908
#define DMA_ALIGN_4K
Definition: bnxt.h:162
Definition: bnxt.h:914
#define IPXE_VERSION_MINOR
Definition: bnxt.h:39
#define GRC_COM_CHAN_BASE
Definition: bnxt.h:133
uint16_t flag
Flag number.
Definition: hyperv.h:14
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
Definition: bnxt_hsi.h:3090
#define HWRM_PORT_PHY_QCAPS
Definition: bnxt_hsi.h:128
u8 tx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets to the AP.
Definition: wpa.h:237
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT
Definition: bnxt_hsi.h:5667
#define HWRM_CMD_DEFAULT_TIMEOUT
Definition: bnxt.h:141
static int bnxt_get_phy_link(struct bnxt *bp)
Definition: bnxt.c:1673
if(natsemi->flags &NATSEMI_64BIT) return 1
union dma_addr64_t dma
Definition: bnxt.h:476
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX
Definition: bnxt_hsi.h:2966
#define BNXT_FLAG_IS_CHIP_P5_PLUS
Definition: bnxt.h:57
void * memset(void *dest, int character, size_t len) __nonnull
A persistent I/O buffer.
Definition: iobuf.h:33
uint8_t flags
Flags.
Definition: ena.h:18
#define DBC_MSG_XID(xid, flg)
Definition: bnxt.h:200
struct pci_driver bnxt_pci_driver __pci_driver
Definition: bnxt.c:2436
#define VALID_RX_IOB
Definition: bnxt.h:910
int bnxt_hwrm_ring_free(struct bnxt *bp, u16 ring_id, u8 ring_type)
Definition: bnxt.c:1795
static void bnxt_db_cq(struct bnxt *bp)
Definition: bnxt.c:227