iPXE
bnxt.c
Go to the documentation of this file.
1 
2 FILE_LICENCE ( GPL2_ONLY );
3 
4 #include <mii.h>
5 #include <stdio.h>
6 #include <errno.h>
7 #include <unistd.h>
8 #include <byteswap.h>
9 #include <ipxe/pci.h>
10 #include <ipxe/iobuf.h>
11 #include <ipxe/timer.h>
12 #include <ipxe/malloc.h>
13 #include <ipxe/if_ether.h>
14 #include <ipxe/ethernet.h>
15 #include <ipxe/netdevice.h>
16 #include "bnxt.h"
17 #include "bnxt_dbg.h"
18 
19 static void bnxt_service_cq ( struct net_device *dev );
20 static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx );
21 static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt );
22 static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt );
23 static int bnxt_rx_complete ( struct net_device *dev, struct rx_pkt_cmpl *rx );
24 void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt );
25 
26 static struct pci_device_id bnxt_nics[] = {
27  PCI_ROM( 0x14e4, 0x16c0, "14e4-16C0", "14e4-16C0", 0 ),
28  PCI_ROM( 0x14e4, 0x16c1, "14e4-16C1", "14e4-16C1", BNXT_FLAG_PCI_VF ),
29  PCI_ROM( 0x14e4, 0x16c8, "14e4-16C8", "14e4-16C8", 0 ),
30  PCI_ROM( 0x14e4, 0x16c9, "14e4-16C9", "14e4-16C9", 0 ),
31  PCI_ROM( 0x14e4, 0x16ca, "14e4-16CA", "14e4-16CA", 0 ),
32  PCI_ROM( 0x14e4, 0x16cc, "14e4-16CC", "14e4-16CC", 0 ),
33  PCI_ROM( 0x14e4, 0x16cd, "14e4-16CD", "14e4-16CD", 0 ),
34  PCI_ROM( 0x14e4, 0x16ce, "14e4-16CE", "14e4-16CE", 0 ),
35  PCI_ROM( 0x14e4, 0x16cf, "14e4-16CF", "14e4-16CF", 0 ),
36  PCI_ROM( 0x14e4, 0x16d0, "14e4-16D0", "14e4-16D0", 0 ),
37  PCI_ROM( 0x14e4, 0x16d1, "14e4-16D1", "14e4-16D1", 0 ),
38  PCI_ROM( 0x14e4, 0x16d2, "14e4-16D2", "14e4-16D2", 0 ),
39  PCI_ROM( 0x14e4, 0x16d4, "14e4-16D4", "14e4-16D4", 0 ),
40  PCI_ROM( 0x14e4, 0x16d5, "14e4-16D5", "14e4-16D5", 0 ),
41  PCI_ROM( 0x14e4, 0x16d6, "14e4-16D6", "14e4-16D6", 0 ),
42  PCI_ROM( 0x14e4, 0x16d7, "14e4-16D7", "14e4-16D7", 0 ),
43  PCI_ROM( 0x14e4, 0x16d8, "14e4-16D8", "14e4-16D8", 0 ),
44  PCI_ROM( 0x14e4, 0x16d9, "14e4-16D9", "14e4-16D9", 0 ),
45  PCI_ROM( 0x14e4, 0x16da, "14e4-16DA", "14e4-16DA", 0 ),
46  PCI_ROM( 0x14e4, 0x16db, "14e4-16DB", "14e4-16DB", 0 ),
47  PCI_ROM( 0x14e4, 0x16dc, "14e4-16DC", "14e4-16DC", BNXT_FLAG_PCI_VF ),
48  PCI_ROM( 0x14e4, 0x16de, "14e4-16DE", "14e4-16DE", 0 ),
49  PCI_ROM( 0x14e4, 0x16df, "14e4-16DF", "14e4-16DF", 0 ),
50  PCI_ROM( 0x14e4, 0x16e0, "14e4-16E0", "14e4-16E0", 0 ),
51  PCI_ROM( 0x14e4, 0x16e2, "14e4-16E2", "14e4-16E2", 0 ),
52  PCI_ROM( 0x14e4, 0x16e3, "14e4-16E3", "14e4-16E3", 0 ),
53  PCI_ROM( 0x14e4, 0x16e4, "14e4-16E4", "14e4-16E4", 0 ),
54  PCI_ROM( 0x14e4, 0x16e7, "14e4-16E7", "14e4-16E7", 0 ),
55  PCI_ROM( 0x14e4, 0x16e8, "14e4-16E8", "14e4-16E8", 0 ),
56  PCI_ROM( 0x14e4, 0x16e9, "14e4-16E9", "14e4-16E9", 0 ),
57  PCI_ROM( 0x14e4, 0x16ea, "14e4-16EA", "14e4-16EA", 0 ),
58  PCI_ROM( 0x14e4, 0x16eb, "14e4-16EB", "14e4-16EB", 0 ),
59  PCI_ROM( 0x14e4, 0x16ec, "14e4-16EC", "14e4-16EC", 0 ),
60  PCI_ROM( 0x14e4, 0x16ed, "14e4-16ED", "14e4-16ED", 0 ),
61  PCI_ROM( 0x14e4, 0x16ee, "14e4-16EE", "14e4-16EE", 0 ),
62  PCI_ROM( 0x14e4, 0x16ef, "14e4-16EF", "14e4-16EF", 0 ),
63  PCI_ROM( 0x14e4, 0x16f0, "14e4-16F0", "14e4-16F0", 0 ),
64  PCI_ROM( 0x14e4, 0x16f1, "14e4-16F1", "14e4-16F1", 0 ),
65  PCI_ROM( 0x14e4, 0x1604, "14e4-1604", "14e4-1604", 0 ),
66  PCI_ROM( 0x14e4, 0x1605, "14e4-1605", "14e4-1605", 0 ),
67  PCI_ROM( 0x14e4, 0x1606, "14e4-1606", "14e4-1606", 0 ),
68  PCI_ROM( 0x14e4, 0x1609, "14e4-1609", "14e4-1609", 0 ),
69  PCI_ROM( 0x14e4, 0x1614, "14e4-1614", "14e4-1614", 0 ),
70  PCI_ROM( 0x14e4, 0xd802, "14e4-D802", "14e4-D802", 0 ),
71  PCI_ROM( 0x14e4, 0xd804, "14e4-D804", "14e4-D804", 0 ),
72  PCI_ROM( 0x14e4, 0x1750, "14e4-1750", "14e4-1750", 0 ),
73  PCI_ROM( 0x14e4, 0x1802, "14e4-1802", "14e4-1802", 0 ),
74  PCI_ROM( 0x14e4, 0x1805, "14e4-1805", "14e4-1805", 0 ),
75  PCI_ROM( 0x14e4, 0x1751, "14e4-1751", "14e4-1751", 0 ),
76  PCI_ROM( 0x14e4, 0x1801, "14e4-1801", "14e4-1801", 0 ),
77  PCI_ROM( 0x14e4, 0x1804, "14e4-1804", "14e4-1804", 0 ),
78  PCI_ROM( 0x14e4, 0x1752, "14e4-1752", "14e4-1752", 0 ),
79  PCI_ROM( 0x14e4, 0x1800, "14e4-1800", "14e4-1800", 0 ),
80  PCI_ROM( 0x14e4, 0x1803, "14e4-1803", "14e4-1803", 0 ),
81  PCI_ROM( 0x14e4, 0x1806, "14e4-1806", "14e4-1806", BNXT_FLAG_PCI_VF ),
82  PCI_ROM( 0x14e4, 0x1807, "14e4-1807", "14e4-1807", BNXT_FLAG_PCI_VF ),
83  PCI_ROM( 0x14e4, 0x1808, "14e4-1808", "14e4-1808", BNXT_FLAG_PCI_VF ),
84  PCI_ROM( 0x14e4, 0x1809, "14e4-1809", "14e4-1809", BNXT_FLAG_PCI_VF ),
85 };
86 
87 /**
88  * Check if Virtual Function
89  */
90 u8 bnxt_is_pci_vf ( struct pci_device *pdev )
91 {
92  if ( FLAG_TEST ( pdev->id->driver_data, BNXT_FLAG_PCI_VF ) ) {
93  return 1;
94  }
95  return 0;
96 }
97 
98 static void bnxt_down_pci ( struct bnxt *bp )
99 {
100  DBGP ( "%s\n", __func__ );
101  if ( bp->bar2 ) {
102  iounmap ( bp->bar2 );
103  bp->bar2 = NULL;
104  }
105  if ( bp->bar1 ) {
106  iounmap ( bp->bar1 );
107  bp->bar1 = NULL;
108  }
109  if ( bp->bar0 ) {
110  iounmap ( bp->bar0 );
111  bp->bar0 = NULL;
112  }
113 }
114 
115 static void *bnxt_pci_base ( struct pci_device *pdev, unsigned int reg )
116 {
117  unsigned long reg_base, reg_size;
118 
119  reg_base = pci_bar_start ( pdev, reg );
120  reg_size = pci_bar_size ( pdev, reg );
121  return pci_ioremap ( pdev, reg_base, reg_size );
122 }
123 
124 static int bnxt_get_pci_info ( struct bnxt *bp )
125 {
126  u16 cmd_reg = 0;
127 
128  DBGP ( "%s\n", __func__ );
129  /* Disable Interrupt */
130  pci_read_word16 ( bp->pdev, PCI_COMMAND, &bp->cmd_reg );
131  cmd_reg = bp->cmd_reg | PCI_COMMAND_INTX_DISABLE;
132  pci_write_word ( bp->pdev, PCI_COMMAND, cmd_reg );
133  pci_read_word16 ( bp->pdev, PCI_COMMAND, &cmd_reg );
134 
135  /* SSVID */
136  pci_read_word16 ( bp->pdev,
138  &bp->subsystem_vendor );
139 
140  /* SSDID */
141  pci_read_word16 ( bp->pdev,
143  &bp->subsystem_device );
144 
145  /* Function Number */
146  pci_read_byte ( bp->pdev,
148  &bp->pf_num );
149 
150  /* Get Bar Address */
151  bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 );
152  bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 );
153  bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 );
154 
155  /* Virtual function */
156  bp->vf = bnxt_is_pci_vf ( bp->pdev );
157 
158  dbg_pci ( bp, __func__, cmd_reg );
159  return STATUS_SUCCESS;
160 }
161 
162 static int bnxt_get_device_address ( struct bnxt *bp )
163 {
164  struct net_device *dev = bp->dev;
165 
166  DBGP ( "%s\n", __func__ );
167  memcpy ( &dev->hw_addr[0], ( char * )&bp->mac_addr[0], ETH_ALEN );
168  if ( !is_valid_ether_addr ( &dev->hw_addr[0] ) ) {
169  DBGP ( "- %s ( ): Failed\n", __func__ );
170  return -EINVAL;
171  }
172 
173  return STATUS_SUCCESS;
174 }
175 
176 static void bnxt_set_link ( struct bnxt *bp )
177 {
178  if ( bp->link_status == STATUS_LINK_ACTIVE )
179  netdev_link_up ( bp->dev );
180  else
181  netdev_link_down ( bp->dev );
182 }
183 
184 static void thor_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag )
185 {
186  void *off;
187  u64 val;
188 
189  if ( bp->vf )
190  off = ( void * ) ( bp->bar1 + DB_OFFSET_VF );
191  else
192  off = ( void * ) ( bp->bar1 + DB_OFFSET_PF );
193 
194  val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) |
195  ( u64 )DBC_MSG_IDX ( idx );
196  write64 ( val, off );
197 }
198 
199 static void bnxt_db_nq ( struct bnxt *bp )
200 {
201  if ( bp->thor )
202  thor_db ( bp, ( u32 )bp->nq.cons_id,
203  ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM );
204  else
205  write32 ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) );
206 }
207 
208 static void bnxt_db_cq ( struct bnxt *bp )
209 {
210  if ( bp->thor )
211  thor_db ( bp, ( u32 )bp->cq.cons_id,
212  ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL );
213  else
214  write32 ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ),
215  ( bp->bar1 + 0 ) );
216 }
217 
218 static void bnxt_db_rx ( struct bnxt *bp, u32 idx )
219 {
220  if ( bp->thor )
221  thor_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ );
222  else
223  write32 ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) );
224 }
225 
226 static void bnxt_db_tx ( struct bnxt *bp, u32 idx )
227 {
228  if ( bp->thor )
229  thor_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ );
230  else
231  write32 ( ( u32 ) ( TX_DOORBELL_KEY_TX | idx ),
232  ( bp->bar1 + 0 ) );
233 }
234 
235 void bnxt_add_vlan ( struct io_buffer *iob, u16 vlan )
236 {
237  char *src = ( char * )iob->data;
238  u16 len = iob_len ( iob );
239 
240  memmove ( ( char * )&src[MAC_HDR_SIZE + VLAN_HDR_SIZE],
241  ( char * )&src[MAC_HDR_SIZE],
242  ( len - MAC_HDR_SIZE ) );
243 
244  * ( u16 * ) ( &src[MAC_HDR_SIZE] ) = BYTE_SWAP_S ( ETHERTYPE_VLAN );
245  * ( u16 * ) ( &src[MAC_HDR_SIZE + 2] ) = BYTE_SWAP_S ( vlan );
246  iob_put ( iob, VLAN_HDR_SIZE );
247 }
248 
249 static u16 bnxt_get_pkt_vlan ( char *src )
250 {
251  if ( * ( ( u16 * )&src[MAC_HDR_SIZE] ) == BYTE_SWAP_S ( ETHERTYPE_VLAN ) )
252  return BYTE_SWAP_S ( * ( ( u16 * )&src[MAC_HDR_SIZE + 2] ) );
253  return 0;
254 }
255 
256 int bnxt_vlan_drop ( struct bnxt *bp, u16 rx_vlan )
257 {
258  if ( rx_vlan ) {
259  if ( bp->vlan_tx ) {
260  if ( rx_vlan == bp->vlan_tx )
261  return 0;
262  } else {
263  if ( rx_vlan == bp->vlan_id )
264  return 0;
265  if ( rx_vlan && !bp->vlan_id )
266  return 0;
267  }
268  } else {
269  if ( !bp->vlan_tx && !bp->vlan_id )
270  return 0;
271  }
272 
273  return 1;
274 }
275 
276 static inline u32 bnxt_tx_avail ( struct bnxt *bp )
277 {
278  u32 avail;
279  u32 use;
280 
281  barrier ( );
282  avail = TX_AVAIL ( bp->tx.ring_cnt );
283  use = TX_IN_USE ( bp->tx.prod_id, bp->tx.cons_id, bp->tx.ring_cnt );
284  dbg_tx_avail ( bp, avail, use );
285  return ( avail-use );
286 }
287 
288 void bnxt_set_txq ( struct bnxt *bp, int entry, dma_addr_t mapping, int len )
289 {
290  struct tx_bd_short *prod_bd;
291 
292  prod_bd = ( struct tx_bd_short * )BD_NOW ( bp->tx.bd_virt,
293  entry, sizeof ( struct tx_bd_short ) );
294  if ( len < 512 )
296  else if ( len < 1024 )
298  else if ( len < 2048 )
300  else
302  prod_bd->flags_type |= TX_BD_FLAGS;
303  prod_bd->dma.addr = mapping;
304  prod_bd->len = len;
305  prod_bd->opaque = ( u32 )entry;
306 }
307 
308 static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx )
309 {
310  struct bnxt *bp = dev->priv;
311  struct io_buffer *iob;
312 
313  iob = bp->tx.iob[hw_idx];
314  dbg_tx_done ( iob->data, iob_len ( iob ), hw_idx );
315  netdev_tx_complete ( dev, iob );
316  bp->tx.cons_id = NEXT_IDX ( hw_idx, bp->tx.ring_cnt );
317  bp->tx.cnt++;
318  dump_tx_stat ( bp );
319 }
320 
321 int bnxt_free_rx_iob ( struct bnxt *bp )
322 {
323  unsigned int i;
324 
325  DBGP ( "%s\n", __func__ );
326  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RX_IOB ) ) )
327  return STATUS_SUCCESS;
328 
329  for ( i = 0; i < bp->rx.buf_cnt; i++ ) {
330  if ( bp->rx.iob[i] ) {
331  free_iob ( bp->rx.iob[i] );
332  bp->rx.iob[i] = NULL;
333  }
334  }
335  bp->rx.iob_cnt = 0;
336 
337  FLAG_RESET ( bp->flag_hwrm, VALID_RX_IOB );
338  return STATUS_SUCCESS;
339 }
340 
341 static void bnxt_set_rx_desc ( u8 *buf, struct io_buffer *iob,
342  u16 cid, u32 idx )
343 {
344  struct rx_prod_pkt_bd *desc;
345  u16 off = cid * sizeof ( struct rx_prod_pkt_bd );
346 
347  desc = ( struct rx_prod_pkt_bd * )&buf[off];
350  desc->opaque = idx;
351  desc->dma.addr = virt_to_bus ( iob->data );
352 }
353 
354 static int bnxt_alloc_rx_iob ( struct bnxt *bp, u16 cons_id, u16 iob_idx )
355 {
356  struct io_buffer *iob;
357 
358  iob = alloc_iob ( BNXT_RX_STD_DMA_SZ );
359  if ( !iob ) {
360  DBGP ( "- %s ( ): alloc_iob Failed\n", __func__ );
361  return -ENOMEM;
362  }
363 
364  dbg_alloc_rx_iob ( iob, iob_idx, cons_id );
365  bnxt_set_rx_desc ( ( u8 * )bp->rx.bd_virt, iob, cons_id,
366  ( u32 ) iob_idx );
367  bp->rx.iob[iob_idx] = iob;
368  return 0;
369 }
370 
371 int bnxt_post_rx_buffers ( struct bnxt *bp )
372 {
373  u16 cons_id = ( bp->rx.cons_id % bp->rx.ring_cnt );
374  u16 iob_idx;
375 
376  while ( bp->rx.iob_cnt < bp->rx.buf_cnt ) {
377  iob_idx = ( cons_id % bp->rx.buf_cnt );
378  if ( !bp->rx.iob[iob_idx] ) {
379  if ( bnxt_alloc_rx_iob ( bp, cons_id, iob_idx ) < 0 ) {
380  dbg_alloc_rx_iob_fail ( iob_idx, cons_id );
381  break;
382  }
383  }
384  cons_id = NEXT_IDX ( cons_id, bp->rx.ring_cnt );
385  bp->rx.iob_cnt++;
386  }
387 
388  if ( cons_id != bp->rx.cons_id ) {
389  dbg_rx_cid ( bp->rx.cons_id, cons_id );
390  bp->rx.cons_id = cons_id;
391  bnxt_db_rx ( bp, ( u32 )cons_id );
392  }
393 
394  FLAG_SET ( bp->flag_hwrm, VALID_RX_IOB );
395  return STATUS_SUCCESS;
396 }
397 
398 u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob,
399  struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len )
400 {
401  u8 *rx_buf = ( u8 * )iob->data;
402  u16 err_flags, rx_vlan;
403  u8 ignore_chksum_err = 0;
404  int i;
405 
406  err_flags = rx_cmp_hi->errors_v2 >> RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT;
407  if ( rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21 )
408  ignore_chksum_err = 1;
409 
410  if ( err_flags && !ignore_chksum_err ) {
411  bp->rx.drop_err++;
412  return 1;
413  }
414 
415  for ( i = 0; i < 6; i++ ) {
416  if ( rx_buf[6 + i] != bp->mac_addr[i] )
417  break;
418  }
419 
420  /* Drop the loopback packets */
421  if ( i == 6 ) {
422  bp->rx.drop_lb++;
423  return 2;
424  }
425 
426  /* Get VLAN ID from RX completion ring */
427  if ( rx_cmp_hi->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN )
428  rx_vlan = ( rx_cmp_hi->metadata &
430  else
431  rx_vlan = 0;
432 
433  dbg_rx_vlan ( bp, rx_cmp_hi->metadata, rx_cmp_hi->flags2, rx_vlan );
434  if ( bnxt_vlan_drop ( bp, rx_vlan ) ) {
435  bp->rx.drop_vlan++;
436  return 3;
437  }
438  iob_put ( iob, rx_len );
439 
440  if ( rx_vlan )
441  bnxt_add_vlan ( iob, rx_vlan );
442 
443  bp->rx.good++;
444  return 0;
445 }
446 
447 static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt )
448 {
449  u16 cons_id;
450 
451  cons_id = bp->cq.cons_id + cnt;
452  if ( cons_id >= MAX_CQ_DESC_CNT ) {
453  /* Toggle completion bit when the ring wraps. */
454  bp->cq.completion_bit ^= 1;
455  cons_id = cons_id - MAX_CQ_DESC_CNT;
456  }
457  bp->cq.cons_id = cons_id;
458 }
459 
460 void bnxt_rx_process ( struct net_device *dev, struct bnxt *bp,
461  struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi )
462 {
463  u32 desc_idx = rx_cmp->opaque;
464  struct io_buffer *iob = bp->rx.iob[desc_idx];
465  u8 drop;
466 
467  dump_rx_bd ( rx_cmp, rx_cmp_hi, desc_idx );
468  assert ( !iob );
469  drop = bnxt_rx_drop ( bp, iob, rx_cmp_hi, rx_cmp->len );
470  dbg_rxp ( iob->data, rx_cmp->len, drop );
471  if ( drop )
472  netdev_rx_err ( dev, iob, -EINVAL );
473  else
474  netdev_rx ( dev, iob );
475 
476  bp->rx.cnt++;
477  bp->rx.iob[desc_idx] = NULL;
478  bp->rx.iob_cnt--;
480  bnxt_adv_cq_index ( bp, 2 ); /* Rx completion is 2 entries. */
481  dbg_rx_stat ( bp );
482 }
483 
484 static int bnxt_rx_complete ( struct net_device *dev,
485  struct rx_pkt_cmpl *rx_cmp )
486 {
487  struct bnxt *bp = dev->priv;
488  struct rx_pkt_cmpl_hi *rx_cmp_hi;
489  u8 cmpl_bit = bp->cq.completion_bit;
490 
491  if ( bp->cq.cons_id == ( bp->cq.ring_cnt - 1 ) ) {
492  rx_cmp_hi = ( struct rx_pkt_cmpl_hi * )bp->cq.bd_virt;
493  cmpl_bit ^= 0x1; /* Ring has wrapped. */
494  } else
495  rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) ( rx_cmp+1 );
496 
497  if ( ! ( ( rx_cmp_hi->errors_v2 & RX_PKT_CMPL_V2 ) ^ cmpl_bit ) ) {
498  bnxt_rx_process ( dev, bp, rx_cmp, rx_cmp_hi );
499  return SERVICE_NEXT_CQ_BD;
500  } else
502 }
503 
504 void bnxt_mm_init ( struct bnxt *bp, const char *func )
505 {
506  DBGP ( "%s\n", __func__ );
507  memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
508  memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
509  memset ( bp->hwrm_addr_dma, 0, DMA_BUFFER_SIZE );
510  bp->req_addr_mapping = virt_to_bus ( bp->hwrm_addr_req );
511  bp->resp_addr_mapping = virt_to_bus ( bp->hwrm_addr_resp );
512  bp->dma_addr_mapping = virt_to_bus ( bp->hwrm_addr_dma );
513  bp->link_status = STATUS_LINK_DOWN;
514  bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
516  bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
517  bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
518  bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
519  bp->tx.ring_cnt = MAX_TX_DESC_CNT;
520  bp->rx.ring_cnt = MAX_RX_DESC_CNT;
521  bp->rx.buf_cnt = NUM_RX_BUFFERS;
522  dbg_mem ( bp, func );
523 }
524 
525 void bnxt_mm_nic ( struct bnxt *bp )
526 {
527  DBGP ( "%s\n", __func__ );
528  memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE );
529  memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE );
530  memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE );
531  memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE );
532  bp->nq.cons_id = 0;
533  bp->nq.completion_bit = 0x1;
534  bp->cq.cons_id = 0;
535  bp->cq.completion_bit = 0x1;
536  bp->tx.prod_id = 0;
537  bp->tx.cons_id = 0;
538  bp->rx.cons_id = 0;
539  bp->rx.iob_cnt = 0;
540 
541  bp->link_status = STATUS_LINK_DOWN;
542  bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
544  bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
545  bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
546  bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
547  bp->tx.ring_cnt = MAX_TX_DESC_CNT;
548  bp->rx.ring_cnt = MAX_RX_DESC_CNT;
549  bp->rx.buf_cnt = NUM_RX_BUFFERS;
550 }
551 
552 void bnxt_free_mem ( struct bnxt *bp )
553 {
554  DBGP ( "%s\n", __func__ );
555  if ( bp->nq.bd_virt ) {
556  free_phys ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
557  bp->nq.bd_virt = NULL;
558  }
559 
560  if ( bp->cq.bd_virt ) {
561  free_phys ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
562  bp->cq.bd_virt = NULL;
563  }
564 
565  if ( bp->rx.bd_virt ) {
566  free_phys ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
567  bp->rx.bd_virt = NULL;
568  }
569 
570  if ( bp->tx.bd_virt ) {
571  free_phys ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
572  bp->tx.bd_virt = NULL;
573  }
574 
575  if ( bp->hwrm_addr_dma ) {
576  free_phys ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
577  bp->dma_addr_mapping = 0;
578  bp->hwrm_addr_dma = NULL;
579  }
580 
581  if ( bp->hwrm_addr_resp ) {
582  free_phys ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
583  bp->resp_addr_mapping = 0;
584  bp->hwrm_addr_resp = NULL;
585  }
586 
587  if ( bp->hwrm_addr_req ) {
588  free_phys ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
589  bp->req_addr_mapping = 0;
590  bp->hwrm_addr_req = NULL;
591  }
592  DBGP ( "- %s ( ): - Done\n", __func__ );
593 }
594 
595 int bnxt_alloc_mem ( struct bnxt *bp )
596 {
597  DBGP ( "%s\n", __func__ );
598  bp->hwrm_addr_req = malloc_phys ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
599  bp->hwrm_addr_resp = malloc_phys ( RESP_BUFFER_SIZE,
601  bp->hwrm_addr_dma = malloc_phys ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
602  bp->tx.bd_virt = malloc_phys ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
603  bp->rx.bd_virt = malloc_phys ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
606  test_if ( bp->hwrm_addr_req &&
607  bp->hwrm_addr_resp &&
608  bp->hwrm_addr_dma &&
609  bp->tx.bd_virt &&
610  bp->rx.bd_virt &&
611  bp->nq.bd_virt &&
612  bp->cq.bd_virt ) {
613  bnxt_mm_init ( bp, __func__ );
614  return STATUS_SUCCESS;
615  }
616 
617  DBGP ( "- %s ( ): Failed\n", __func__ );
618  bnxt_free_mem ( bp );
619  return -ENOMEM;
620 }
621 
622 static void hwrm_init ( struct bnxt *bp, struct input *req, u16 cmd, u16 len )
623 {
624  memset ( req, 0, len );
625  req->req_type = cmd;
626  req->cmpl_ring = ( u16 )HWRM_NA_SIGNATURE;
627  req->target_id = ( u16 )HWRM_NA_SIGNATURE;
628  req->resp_addr = bp->resp_addr_mapping;
629  req->seq_id = bp->seq_id++;
630 }
631 
632 static void hwrm_write_req ( struct bnxt *bp, void *req, u32 cnt )
633 {
634  u32 i = 0;
635 
636  for ( i = 0; i < cnt; i++ ) {
637  write32 ( ( ( u32 * )req )[i],
638  ( bp->bar0 + GRC_COM_CHAN_BASE + ( i * 4 ) ) );
639  }
640  write32 ( 0x1, ( bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG ) );
641 }
642 
643 static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len )
644 {
645  struct hwrm_short_input sreq;
646 
647  memset ( &sreq, 0, sizeof ( struct hwrm_short_input ) );
648  sreq.req_type = ( u16 ) ( ( struct input * )bp->hwrm_addr_req )->req_type;
650  sreq.size = len;
651  sreq.req_addr = bp->req_addr_mapping;
652  mdelay ( 100 );
653  dbg_short_cmd ( ( u8 * )&sreq, __func__,
654  sizeof ( struct hwrm_short_input ) );
655  hwrm_write_req ( bp, &sreq, sizeof ( struct hwrm_short_input ) / 4 );
656 }
657 
658 static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func )
659 {
660  struct input *req = ( struct input * )bp->hwrm_addr_req;
661  struct output *resp = ( struct output * )bp->hwrm_addr_resp;
662  u8 *ptr = ( u8 * )resp;
663  u32 idx;
664  u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER ( ( u32 )tmo );
665  u16 resp_len = 0;
666  u16 ret = STATUS_TIMEOUT;
667 
668  if ( len > bp->hwrm_max_req_len )
670  else
671  hwrm_write_req ( bp, req, ( u32 ) ( len / 4 ) );
672 
673  for ( idx = 0; idx < wait_cnt; idx++ ) {
674  resp_len = resp->resp_len;
675  test_if ( resp->seq_id == req->seq_id &&
676  resp->req_type == req->req_type &&
677  ptr[resp_len - 1] == 1 ) {
678  bp->last_resp_code = resp->error_code;
679  ret = resp->error_code;
680  break;
681  }
683  }
684  dbg_hw_cmd ( bp, func, len, resp_len, tmo, ret );
685  return ( int )ret;
686 }
687 
688 static int bnxt_hwrm_ver_get ( struct bnxt *bp )
689 {
690  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ver_get_input );
691  struct hwrm_ver_get_input *req;
692  struct hwrm_ver_get_output *resp;
693  int rc;
694 
695  DBGP ( "%s\n", __func__ );
696  req = ( struct hwrm_ver_get_input * )bp->hwrm_addr_req;
697  resp = ( struct hwrm_ver_get_output * )bp->hwrm_addr_resp;
698  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VER_GET, cmd_len );
699  req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
700  req->hwrm_intf_min = HWRM_VERSION_MINOR;
701  req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
702  rc = wait_resp ( bp, HWRM_CMD_DEFAULT_TIMEOUT, cmd_len, __func__ );
703  if ( rc )
704  return STATUS_FAILURE;
705 
706  bp->hwrm_spec_code =
707  resp->hwrm_intf_maj_8b << 16 |
708  resp->hwrm_intf_min_8b << 8 |
709  resp->hwrm_intf_upd_8b;
710  bp->hwrm_cmd_timeout = ( u32 )resp->def_req_timeout;
711  if ( !bp->hwrm_cmd_timeout )
712  bp->hwrm_cmd_timeout = ( u32 )HWRM_CMD_DEFAULT_TIMEOUT;
713  if ( resp->hwrm_intf_maj_8b >= 1 )
714  bp->hwrm_max_req_len = resp->max_req_win_len;
715  bp->chip_id =
716  resp->chip_rev << 24 |
717  resp->chip_metal << 16 |
718  resp->chip_bond_id << 8 |
719  resp->chip_platform_type;
720  bp->chip_num = resp->chip_num;
721  test_if ( ( resp->dev_caps_cfg & SHORT_CMD_SUPPORTED ) &&
722  ( resp->dev_caps_cfg & SHORT_CMD_REQUIRED ) )
724  bp->hwrm_max_ext_req_len = resp->max_ext_req_len;
725  if ( bp->chip_num == CHIP_NUM_57500 )
726  bp->thor = 1;
727  dbg_fw_ver ( resp, bp->hwrm_cmd_timeout );
728  return STATUS_SUCCESS;
729 }
730 
731 static int bnxt_hwrm_func_resource_qcaps ( struct bnxt *bp )
732 {
733  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_resource_qcaps_input );
734  struct hwrm_func_resource_qcaps_input *req;
735  struct hwrm_func_resource_qcaps_output *resp;
736  int rc;
737 
738  DBGP ( "%s\n", __func__ );
739  req = ( struct hwrm_func_resource_qcaps_input * )bp->hwrm_addr_req;
740  resp = ( struct hwrm_func_resource_qcaps_output * )bp->hwrm_addr_resp;
741  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESOURCE_QCAPS,
742  cmd_len );
743  req->fid = ( u16 )HWRM_NA_SIGNATURE;
744  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
745  if ( rc != STATUS_SUCCESS )
746  return STATUS_SUCCESS;
747 
749 
750  // VFs
751  if ( !bp->vf ) {
752  bp->max_vfs = resp->max_vfs;
753  bp->vf_res_strategy = resp->vf_reservation_strategy;
754  }
755 
756  // vNICs
757  bp->min_vnics = resp->min_vnics;
758  bp->max_vnics = resp->max_vnics;
759 
760  // MSI-X
761  bp->max_msix = resp->max_msix;
762 
763  // Ring Groups
764  bp->min_hw_ring_grps = resp->min_hw_ring_grps;
765  bp->max_hw_ring_grps = resp->max_hw_ring_grps;
766 
767  // TX Rings
768  bp->min_tx_rings = resp->min_tx_rings;
769  bp->max_tx_rings = resp->max_tx_rings;
770 
771  // RX Rings
772  bp->min_rx_rings = resp->min_rx_rings;
773  bp->max_rx_rings = resp->max_rx_rings;
774 
775  // Completion Rings
776  bp->min_cp_rings = resp->min_cmpl_rings;
777  bp->max_cp_rings = resp->max_cmpl_rings;
778 
779  // RSS Contexts
780  bp->min_rsscos_ctxs = resp->min_rsscos_ctx;
781  bp->max_rsscos_ctxs = resp->max_rsscos_ctx;
782 
783  // L2 Contexts
784  bp->min_l2_ctxs = resp->min_l2_ctxs;
785  bp->max_l2_ctxs = resp->max_l2_ctxs;
786 
787  // Statistic Contexts
788  bp->min_stat_ctxs = resp->min_stat_ctx;
789  bp->max_stat_ctxs = resp->max_stat_ctx;
791  return STATUS_SUCCESS;
792 }
793 
794 static u32 bnxt_set_ring_info ( struct bnxt *bp )
795 {
796  u32 enables = 0;
797 
798  DBGP ( "%s\n", __func__ );
799  bp->num_cmpl_rings = DEFAULT_NUMBER_OF_CMPL_RINGS;
800  bp->num_tx_rings = DEFAULT_NUMBER_OF_TX_RINGS;
801  bp->num_rx_rings = DEFAULT_NUMBER_OF_RX_RINGS;
802  bp->num_hw_ring_grps = DEFAULT_NUMBER_OF_RING_GRPS;
803  bp->num_stat_ctxs = DEFAULT_NUMBER_OF_STAT_CTXS;
804 
805  if ( bp->min_cp_rings <= DEFAULT_NUMBER_OF_CMPL_RINGS )
806  bp->num_cmpl_rings = bp->min_cp_rings;
807 
808  if ( bp->min_tx_rings <= DEFAULT_NUMBER_OF_TX_RINGS )
809  bp->num_tx_rings = bp->min_tx_rings;
810 
811  if ( bp->min_rx_rings <= DEFAULT_NUMBER_OF_RX_RINGS )
812  bp->num_rx_rings = bp->min_rx_rings;
813 
814  if ( bp->min_hw_ring_grps <= DEFAULT_NUMBER_OF_RING_GRPS )
815  bp->num_hw_ring_grps = bp->min_hw_ring_grps;
816 
817  if ( bp->min_stat_ctxs <= DEFAULT_NUMBER_OF_STAT_CTXS )
818  bp->num_stat_ctxs = bp->min_stat_ctxs;
819 
820  dbg_num_rings ( bp );
826  return enables;
827 }
828 
829 static void bnxt_hwrm_assign_resources ( struct bnxt *bp )
830 {
831  struct hwrm_func_cfg_input *req;
832  u32 enables = 0;
833 
834  DBGP ( "%s\n", __func__ );
837 
838  req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
839  req->num_cmpl_rings = bp->num_cmpl_rings;
840  req->num_tx_rings = bp->num_tx_rings;
841  req->num_rx_rings = bp->num_rx_rings;
842  req->num_stat_ctxs = bp->num_stat_ctxs;
843  req->num_hw_ring_grps = bp->num_hw_ring_grps;
844  req->enables = enables;
845 }
846 
847 static int bnxt_hwrm_func_qcaps_req ( struct bnxt *bp )
848 {
849  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcaps_input );
850  struct hwrm_func_qcaps_input *req;
851  struct hwrm_func_qcaps_output *resp;
852  int rc;
853 
854  DBGP ( "%s\n", __func__ );
855  if ( bp->vf )
856  return STATUS_SUCCESS;
857 
858  req = ( struct hwrm_func_qcaps_input * )bp->hwrm_addr_req;
859  resp = ( struct hwrm_func_qcaps_output * )bp->hwrm_addr_resp;
860  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCAPS, cmd_len );
861  req->fid = ( u16 )HWRM_NA_SIGNATURE;
862  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
863  if ( rc ) {
864  DBGP ( "- %s ( ): Failed\n", __func__ );
865  return STATUS_FAILURE;
866  }
867 
868  bp->fid = resp->fid;
869  bp->port_idx = ( u8 )resp->port_id;
870 
871  /* Get MAC address for this PF */
872  memcpy ( &bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN );
873  dbg_func_qcaps ( bp );
874  return STATUS_SUCCESS;
875 }
876 
877 static int bnxt_hwrm_func_qcfg_req ( struct bnxt *bp )
878 {
879  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcfg_input );
880  struct hwrm_func_qcfg_input *req;
881  struct hwrm_func_qcfg_output *resp;
882  int rc;
883 
884  DBGP ( "%s\n", __func__ );
885  req = ( struct hwrm_func_qcfg_input * )bp->hwrm_addr_req;
886  resp = ( struct hwrm_func_qcfg_output * )bp->hwrm_addr_resp;
887  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCFG, cmd_len );
888  req->fid = ( u16 )HWRM_NA_SIGNATURE;
889  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
890  if ( rc ) {
891  DBGP ( "- %s ( ): Failed\n", __func__ );
892  return STATUS_FAILURE;
893  }
894 
896  FLAG_SET ( bp->flags, BNXT_FLAG_MULTI_HOST );
897 
898  if ( resp->port_partition_type &
900  FLAG_SET ( bp->flags, BNXT_FLAG_NPAR_MODE );
901 
902  bp->ordinal_value = ( u8 )resp->pci_id & 0x0F;
903  bp->stat_ctx_id = resp->stat_ctx_id;
904 
905  /* If VF is set to TRUE, then use some data from func_qcfg ( ). */
906  if ( bp->vf ) {
907  bp->fid = resp->fid;
908  bp->port_idx = ( u8 )resp->port_id;
909  bp->vlan_id = resp->vlan;
910 
911  /* Get MAC address for this VF */
912  memcpy ( bp->mac_addr, resp->mac_address, ETH_ALEN );
913  }
914  dbg_func_qcfg ( bp );
915  return STATUS_SUCCESS;
916 }
917 
918 static int bnxt_hwrm_func_reset_req ( struct bnxt *bp )
919 {
920  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_reset_input );
921  struct hwrm_func_reset_input *req;
922 
923  DBGP ( "%s\n", __func__ );
924  req = ( struct hwrm_func_reset_input * )bp->hwrm_addr_req;
925  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESET, cmd_len );
926  if ( !bp->vf )
928 
929  return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
930 }
931 
932 static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp )
933 {
934  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input );
935  struct hwrm_func_cfg_input *req;
936 
937  DBGP ( "%s\n", __func__ );
938  if ( bp->vf )
939  return STATUS_SUCCESS;
940 
941  req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
942  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
943  req->fid = ( u16 )HWRM_NA_SIGNATURE;
945  if ( bp->thor ) {
949  req->num_msix = 1;
950  req->num_vnics = 1;
952  }
953  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
954 }
955 
956 static int bnxt_hwrm_func_drv_rgtr ( struct bnxt *bp )
957 {
958  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_rgtr_input );
959  struct hwrm_func_drv_rgtr_input *req;
960  int rc;
961 
962  DBGP ( "%s\n", __func__ );
963  req = ( struct hwrm_func_drv_rgtr_input * )bp->hwrm_addr_req;
964  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_RGTR, cmd_len );
965 
966  /* Register with HWRM */
970  req->async_event_fwd[0] |= 0x01;
975  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
976  if ( rc ) {
977  DBGP ( "- %s ( ): Failed\n", __func__ );
978  return STATUS_FAILURE;
979  }
980 
981  FLAG_SET ( bp->flag_hwrm, VALID_DRIVER_REG );
982  return STATUS_SUCCESS;
983 }
984 
985 static int bnxt_hwrm_func_drv_unrgtr ( struct bnxt *bp )
986 {
987  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_unrgtr_input );
988  struct hwrm_func_drv_unrgtr_input *req;
989  int rc;
990 
991  DBGP ( "%s\n", __func__ );
992  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_DRIVER_REG ) ) )
993  return STATUS_SUCCESS;
994 
995  req = ( struct hwrm_func_drv_unrgtr_input * )bp->hwrm_addr_req;
996  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_UNRGTR, cmd_len );
998  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
999  if ( rc )
1000  return STATUS_FAILURE;
1001 
1002  FLAG_RESET ( bp->flag_hwrm, VALID_DRIVER_REG );
1003  return STATUS_SUCCESS;
1004 }
1005 
1006 static int bnxt_hwrm_set_async_event ( struct bnxt *bp )
1007 {
1008  int rc;
1009  u16 idx;
1010 
1011  DBGP ( "%s\n", __func__ );
1012  if ( bp->thor )
1013  idx = bp->nq_ring_id;
1014  else
1015  idx = bp->cq_ring_id;
1016  if ( bp->vf ) {
1017  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_vf_cfg_input );
1018  struct hwrm_func_vf_cfg_input *req;
1019 
1020  req = ( struct hwrm_func_vf_cfg_input * )bp->hwrm_addr_req;
1021  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_VF_CFG,
1022  cmd_len );
1024  req->async_event_cr = idx;
1025  req->mtu = bp->mtu;
1026  req->guest_vlan = bp->vlan_id;
1027  memcpy ( ( char * )&req->dflt_mac_addr[0], bp->mac_addr,
1028  ETH_ALEN );
1029  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1030  } else {
1031  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input );
1032  struct hwrm_func_cfg_input *req;
1033 
1034  req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
1035  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
1036  req->fid = ( u16 )HWRM_NA_SIGNATURE;
1038  req->async_event_cr = idx;
1039  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1040  }
1041  return rc;
1042 }
1043 
1044 static int bnxt_hwrm_cfa_l2_filter_alloc ( struct bnxt *bp )
1045 {
1046  u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_alloc_input );
1047  struct hwrm_cfa_l2_filter_alloc_input *req;
1048  struct hwrm_cfa_l2_filter_alloc_output *resp;
1049  int rc;
1051  u32 enables;
1052 
1053  DBGP ( "%s\n", __func__ );
1054  req = ( struct hwrm_cfa_l2_filter_alloc_input * )bp->hwrm_addr_req;
1055  resp = ( struct hwrm_cfa_l2_filter_alloc_output * )bp->hwrm_addr_resp;
1056  if ( bp->vf )
1061 
1062  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_ALLOC,
1063  cmd_len );
1064  req->flags = flags;
1065  req->enables = enables;
1066  memcpy ( ( char * )&req->l2_addr[0], ( char * )&bp->mac_addr[0],
1067  ETH_ALEN );
1068  memset ( ( char * )&req->l2_addr_mask[0], 0xff, ETH_ALEN );
1069  if ( !bp->vf ) {
1070  memcpy ( ( char * )&req->t_l2_addr[0], bp->mac_addr, ETH_ALEN );
1071  memset ( ( char * )&req->t_l2_addr_mask[0], 0xff, ETH_ALEN );
1072  }
1074  req->src_id = ( u32 )bp->port_idx;
1075  req->dst_id = bp->vnic_id;
1076  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1077  if ( rc )
1078  return STATUS_FAILURE;
1079 
1080  FLAG_SET ( bp->flag_hwrm, VALID_L2_FILTER );
1081  bp->l2_filter_id = resp->l2_filter_id;
1082  return STATUS_SUCCESS;
1083 }
1084 
1085 static int bnxt_hwrm_cfa_l2_filter_free ( struct bnxt *bp )
1086 {
1087  u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_free_input );
1088  struct hwrm_cfa_l2_filter_free_input *req;
1089  int rc;
1090 
1091  DBGP ( "%s\n", __func__ );
1092  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_L2_FILTER ) ) )
1093  return STATUS_SUCCESS;
1094 
1095  req = ( struct hwrm_cfa_l2_filter_free_input * )bp->hwrm_addr_req;
1096  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_FREE,
1097  cmd_len );
1098  req->l2_filter_id = bp->l2_filter_id;
1099  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1100  if ( rc ) {
1101  DBGP ( "- %s ( ): Failed\n", __func__ );
1102  return STATUS_FAILURE;
1103  }
1104 
1105  FLAG_RESET ( bp->flag_hwrm, VALID_L2_FILTER );
1106  return STATUS_SUCCESS;
1107 }
1108 
1109 u32 set_rx_mask ( u32 rx_mask )
1110 {
1111  u32 mask = 0;
1112 
1113  if ( !rx_mask )
1114  return mask;
1115 
1117  if ( rx_mask != RX_MASK_ACCEPT_NONE ) {
1118  if ( rx_mask & RX_MASK_ACCEPT_MULTICAST )
1120  if ( rx_mask & RX_MASK_ACCEPT_ALL_MULTICAST )
1122  if ( rx_mask & RX_MASK_PROMISCUOUS_MODE )
1124  }
1125  return mask;
1126 }
1127 
1128 static int bnxt_hwrm_set_rx_mask ( struct bnxt *bp, u32 rx_mask )
1129 {
1130  u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_set_rx_mask_input );
1131  struct hwrm_cfa_l2_set_rx_mask_input *req;
1132  u32 mask = set_rx_mask ( rx_mask );
1133 
1134  req = ( struct hwrm_cfa_l2_set_rx_mask_input * )bp->hwrm_addr_req;
1135  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_SET_RX_MASK,
1136  cmd_len );
1137  req->vnic_id = bp->vnic_id;
1138  req->mask = mask;
1139 
1140  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1141 }
1142 
1143 static int bnxt_hwrm_port_phy_qcfg ( struct bnxt *bp, u16 idx )
1144 {
1145  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcfg_input );
1146  struct hwrm_port_phy_qcfg_input *req;
1147  struct hwrm_port_phy_qcfg_output *resp;
1148  int rc;
1149 
1150  DBGP ( "%s\n", __func__ );
1151  req = ( struct hwrm_port_phy_qcfg_input * )bp->hwrm_addr_req;
1152  resp = ( struct hwrm_port_phy_qcfg_output * )bp->hwrm_addr_resp;
1153  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCFG, cmd_len );
1154  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1155  if ( rc ) {
1156  DBGP ( "- %s ( ): Failed\n", __func__ );
1157  return STATUS_FAILURE;
1158  }
1159 
1160  if ( idx & SUPPORT_SPEEDS )
1161  bp->support_speeds = resp->support_speeds;
1162 
1163  if ( idx & DETECT_MEDIA )
1164  bp->media_detect = resp->module_status;
1165 
1166  if ( idx & PHY_SPEED )
1167  bp->current_link_speed = resp->link_speed;
1168 
1169  if ( idx & PHY_STATUS ) {
1170  if ( resp->link == PORT_PHY_QCFG_RESP_LINK_LINK )
1171  bp->link_status = STATUS_LINK_ACTIVE;
1172  else
1173  bp->link_status = STATUS_LINK_DOWN;
1174  }
1175  return STATUS_SUCCESS;
1176 }
1177 
1179  u16 data_len, u16 option_num, u16 dimensions, u16 index_0 )
1180 {
1181  u16 cmd_len = ( u16 )sizeof ( struct hwrm_nvm_get_variable_input );
1182  struct hwrm_nvm_get_variable_input *req;
1183 
1184  DBGP ( "%s\n", __func__ );
1185  req = ( struct hwrm_nvm_get_variable_input * )bp->hwrm_addr_req;
1186  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_NVM_GET_VARIABLE, cmd_len );
1187  req->dest_data_addr = bp->dma_addr_mapping;
1188  req->data_len = data_len;
1189  req->option_num = option_num;
1190  req->dimensions = dimensions;
1191  req->index_0 = index_0;
1192  return wait_resp ( bp,
1193  HWRM_CMD_FLASH_MULTIPLAYER ( bp->hwrm_cmd_timeout ),
1194  cmd_len, __func__ );
1195 }
1196 
1197 static int bnxt_get_link_speed ( struct bnxt *bp )
1198 {
1199  u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma;
1200 
1201  DBGP ( "%s\n", __func__ );
1204  1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1205  return STATUS_FAILURE;
1206  bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT );
1209  1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1210  return STATUS_FAILURE;
1211  bp->link_set |= SET_LINK ( *ptr32, SPEED_FW_MASK, SPEED_FW_SHIFT );
1213  ( u16 )D3_LINK_SPEED_FW_NUM, 1,
1214  ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1215  return STATUS_FAILURE;
1216  bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK,
1220  1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1221  return STATUS_FAILURE;
1222  bp->link_set |= SET_LINK ( *ptr32,
1224 
1225  switch ( bp->link_set & LINK_SPEED_DRV_MASK ) {
1226  case LINK_SPEED_DRV_1G:
1228  break;
1229  case LINK_SPEED_DRV_2_5G:
1231  break;
1232  case LINK_SPEED_DRV_10G:
1233  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_10GBPS );
1234  break;
1235  case LINK_SPEED_DRV_25G:
1236  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_25GBPS );
1237  break;
1238  case LINK_SPEED_DRV_40G:
1239  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_40GBPS );
1240  break;
1241  case LINK_SPEED_DRV_50G:
1242  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_50GBPS );
1243  break;
1244  case LINK_SPEED_DRV_100G:
1245  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_100GBPS );
1246  break;
1247  case LINK_SPEED_DRV_200G:
1248  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_200GBPS );
1249  break;
1251  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_AUTONEG );
1252  break;
1253  default:
1254  bp->medium = SET_MEDIUM_DUPLEX ( bp, MEDIUM_FULL_DUPLEX );
1255  break;
1256  }
1257  prn_set_speed ( bp->link_set );
1258  return STATUS_SUCCESS;
1259 }
1260 
1261 static int bnxt_get_vlan ( struct bnxt *bp )
1262 {
1263  u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma;
1264 
1265  /* If VF is set to TRUE, Do not issue this command */
1266  if ( bp->vf )
1267  return STATUS_SUCCESS;
1268 
1271  ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
1272  return STATUS_FAILURE;
1273 
1274  bp->mba_cfg2 = SET_MBA ( *ptr32, VLAN_MASK, VLAN_SHIFT );
1277  ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
1278  return STATUS_FAILURE;
1279 
1280  bp->mba_cfg2 |= SET_MBA ( *ptr32, VLAN_VALUE_MASK, VLAN_VALUE_SHIFT );
1281  if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
1282  bp->vlan_id = bp->mba_cfg2 & VLAN_VALUE_MASK;
1283  else
1284  bp->vlan_id = 0;
1285 
1286  if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
1287  DBGP ( "VLAN MBA Enabled ( %d )\n",
1288  ( bp->mba_cfg2 & VLAN_VALUE_MASK ) );
1289 
1290  return STATUS_SUCCESS;
1291 }
1292 
1293 static int bnxt_hwrm_backing_store_qcfg ( struct bnxt *bp )
1294 {
1295  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_qcfg_input );
1297 
1298  DBGP ( "%s\n", __func__ );
1299  if ( !bp->thor )
1300  return STATUS_SUCCESS;
1301 
1302  req = ( struct hwrm_func_backing_store_qcfg_input * )bp->hwrm_addr_req;
1303  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_QCFG,
1304  cmd_len );
1305  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1306 }
1307 
1308 static int bnxt_hwrm_backing_store_cfg ( struct bnxt *bp )
1309 {
1310  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_cfg_input );
1312 
1313  DBGP ( "%s\n", __func__ );
1314  if ( !bp->thor )
1315  return STATUS_SUCCESS;
1316 
1317  req = ( struct hwrm_func_backing_store_cfg_input * )bp->hwrm_addr_req;
1318  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_CFG,
1319  cmd_len );
1321  req->enables = 0;
1322  return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1323 }
1324 
1325 static int bnxt_hwrm_queue_qportcfg ( struct bnxt *bp )
1326 {
1327  u16 cmd_len = ( u16 )sizeof ( struct hwrm_queue_qportcfg_input );
1328  struct hwrm_queue_qportcfg_input *req;
1329  struct hwrm_queue_qportcfg_output *resp;
1330  int rc;
1331 
1332  DBGP ( "%s\n", __func__ );
1333  if ( !bp->thor )
1334  return STATUS_SUCCESS;
1335 
1336  req = ( struct hwrm_queue_qportcfg_input * )bp->hwrm_addr_req;
1337  resp = ( struct hwrm_queue_qportcfg_output * )bp->hwrm_addr_resp;
1338  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_QUEUE_QPORTCFG, cmd_len );
1339  req->flags = 0;
1340  req->port_id = 0;
1341  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1342  if ( rc ) {
1343  DBGP ( "- %s ( ): Failed\n", __func__ );
1344  return STATUS_FAILURE;
1345  }
1346 
1347  bp->queue_id = resp->queue_id0;
1348  return STATUS_SUCCESS;
1349 }
1350 
1351 static int bnxt_hwrm_port_mac_cfg ( struct bnxt *bp )
1352 {
1353  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_mac_cfg_input );
1354  struct hwrm_port_mac_cfg_input *req;
1355 
1356  DBGP ( "%s\n", __func__ );
1357  if ( bp->vf )
1358  return STATUS_SUCCESS;
1359 
1360  req = ( struct hwrm_port_mac_cfg_input * )bp->hwrm_addr_req;
1361  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_MAC_CFG, cmd_len );
1363  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1364 }
1365 
1366 static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp )
1367 {
1368  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_cfg_input );
1369  struct hwrm_port_phy_cfg_input *req;
1370  u32 flags;
1371  u32 enables = 0;
1372  u16 force_link_speed = 0;
1374  u8 auto_mode = 0;
1375  u8 auto_pause = 0;
1376  u8 auto_duplex = 0;
1377 
1378  DBGP ( "%s\n", __func__ );
1379  req = ( struct hwrm_port_phy_cfg_input * )bp->hwrm_addr_req;
1382 
1383  switch ( GET_MEDIUM_SPEED ( bp->medium ) ) {
1384  case MEDIUM_SPEED_1000MBPS:
1386  break;
1387  case MEDIUM_SPEED_10GBPS:
1389  break;
1390  case MEDIUM_SPEED_25GBPS:
1392  break;
1393  case MEDIUM_SPEED_40GBPS:
1395  break;
1396  case MEDIUM_SPEED_50GBPS:
1398  break;
1399  case MEDIUM_SPEED_100GBPS:
1401  break;
1402  case MEDIUM_SPEED_200GBPS:
1404  break;
1405  default:
1415  auto_link_speed_mask = bp->support_speeds;
1416  break;
1417  }
1418 
1419  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_CFG, cmd_len );
1420  req->flags = flags;
1421  req->enables = enables;
1422  req->port_id = bp->port_idx;
1423  req->force_link_speed = force_link_speed;
1424  req->auto_mode = auto_mode;
1425  req->auto_duplex = auto_duplex;
1426  req->auto_pause = auto_pause;
1427  req->auto_link_speed_mask = auto_link_speed_mask;
1428 
1429  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1430 }
1431 
1432 static int bnxt_query_phy_link ( struct bnxt *bp )
1433 {
1435 
1436  DBGP ( "%s\n", __func__ );
1437  /* Query Link Status */
1439  return STATUS_FAILURE;
1440  }
1441 
1442  if ( bp->link_status == STATUS_LINK_ACTIVE )
1443  return STATUS_SUCCESS;
1444 
1445  /* If VF is set to TRUE, Do not issue the following commands */
1446  if ( bp->vf )
1447  return STATUS_SUCCESS;
1448 
1449  /* If multi_host or NPAR, Do not issue bnxt_get_link_speed */
1450  if ( FLAG_TEST ( bp->flags, PORT_PHY_FLAGS ) ) {
1451  dbg_flags ( __func__, bp->flags );
1452  return STATUS_SUCCESS;
1453  }
1454 
1455  /* HWRM_NVM_GET_VARIABLE - speed */
1456  if ( bnxt_get_link_speed ( bp ) != STATUS_SUCCESS ) {
1457  return STATUS_FAILURE;
1458  }
1459 
1460  /* Configure link if it is not up */
1462 
1463  /* refresh link speed values after bringing link up */
1464  return bnxt_hwrm_port_phy_qcfg ( bp, flag );
1465 }
1466 
1467 static int bnxt_get_phy_link ( struct bnxt *bp )
1468 {
1469  u16 i;
1471 
1472  DBGP ( "%s\n", __func__ );
1473  dbg_chip_info ( bp );
1474  for ( i = 0; i < ( bp->wait_link_timeout / 100 ); i++ ) {
1476  break;
1477 
1478  if ( bp->link_status == STATUS_LINK_ACTIVE )
1479  break;
1480 
1481 // if ( bp->media_detect )
1482 // break;
1484  }
1485  dbg_link_state ( bp, ( u32 ) ( ( i + 1 ) * 100 ) );
1486  bnxt_set_link ( bp );
1487  return STATUS_SUCCESS;
1488 }
1489 
1490 static int bnxt_hwrm_stat_ctx_alloc ( struct bnxt *bp )
1491 {
1492  u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_alloc_input );
1493  struct hwrm_stat_ctx_alloc_input *req;
1494  struct hwrm_stat_ctx_alloc_output *resp;
1495  int rc;
1496 
1497  DBGP ( "%s\n", __func__ );
1498  req = ( struct hwrm_stat_ctx_alloc_input * )bp->hwrm_addr_req;
1499  resp = ( struct hwrm_stat_ctx_alloc_output * )bp->hwrm_addr_resp;
1500  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_ALLOC, cmd_len );
1501  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1502  if ( rc ) {
1503  DBGP ( "- %s ( ): Failed\n", __func__ );
1504  return STATUS_FAILURE;
1505  }
1506 
1507  FLAG_SET ( bp->flag_hwrm, VALID_STAT_CTX );
1508  bp->stat_ctx_id = ( u16 )resp->stat_ctx_id;
1509  return STATUS_SUCCESS;
1510 }
1511 
1512 static int bnxt_hwrm_stat_ctx_free ( struct bnxt *bp )
1513 {
1514  u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_free_input );
1515  struct hwrm_stat_ctx_free_input *req;
1516  int rc;
1517 
1518  DBGP ( "%s\n", __func__ );
1519  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_STAT_CTX ) ) )
1520  return STATUS_SUCCESS;
1521 
1522  req = ( struct hwrm_stat_ctx_free_input * )bp->hwrm_addr_req;
1523  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_FREE, cmd_len );
1524  req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
1525  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1526  if ( rc ) {
1527  DBGP ( "- %s ( ): Failed\n", __func__ );
1528  return STATUS_FAILURE;
1529  }
1530 
1531  FLAG_RESET ( bp->flag_hwrm, VALID_STAT_CTX );
1532  return STATUS_SUCCESS;
1533 }
1534 
1535 static int bnxt_hwrm_ring_free_grp ( struct bnxt *bp )
1536 {
1537  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_free_input );
1538  struct hwrm_ring_grp_free_input *req;
1539  int rc;
1540 
1541  DBGP ( "%s\n", __func__ );
1542  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_GRP ) ) )
1543  return STATUS_SUCCESS;
1544 
1545  req = ( struct hwrm_ring_grp_free_input * )bp->hwrm_addr_req;
1546  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_FREE, cmd_len );
1547  req->ring_group_id = ( u32 )bp->ring_grp_id;
1548  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1549  if ( rc ) {
1550  DBGP ( "- %s ( ): Failed\n", __func__ );
1551  return STATUS_FAILURE;
1552  }
1553 
1554  FLAG_RESET ( bp->flag_hwrm, VALID_RING_GRP );
1555  return STATUS_SUCCESS;
1556 }
1557 
1558 static int bnxt_hwrm_ring_alloc_grp ( struct bnxt *bp )
1559 {
1560  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_alloc_input );
1561  struct hwrm_ring_grp_alloc_input *req;
1562  struct hwrm_ring_grp_alloc_output *resp;
1563  int rc;
1564 
1565  DBGP ( "%s\n", __func__ );
1566  if ( bp->thor )
1567  return STATUS_SUCCESS;
1568 
1569  req = ( struct hwrm_ring_grp_alloc_input * )bp->hwrm_addr_req;
1570  resp = ( struct hwrm_ring_grp_alloc_output * )bp->hwrm_addr_resp;
1571  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_ALLOC, cmd_len );
1572  req->cr = bp->cq_ring_id;
1573  req->rr = bp->rx_ring_id;
1574  req->ar = ( u16 )HWRM_NA_SIGNATURE;
1575  if ( bp->vf )
1576  req->sc = bp->stat_ctx_id;
1577 
1578  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1579  if ( rc ) {
1580  DBGP ( "- %s ( ): Failed\n", __func__ );
1581  return STATUS_FAILURE;
1582  }
1583 
1584  FLAG_SET ( bp->flag_hwrm, VALID_RING_GRP );
1585  bp->ring_grp_id = ( u16 )resp->ring_group_id;
1586  return STATUS_SUCCESS;
1587 }
1588 
1589 int bnxt_hwrm_ring_free ( struct bnxt *bp, u16 ring_id, u8 ring_type )
1590 {
1591  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_free_input );
1592  struct hwrm_ring_free_input *req;
1593 
1594  DBGP ( "%s\n", __func__ );
1595  req = ( struct hwrm_ring_free_input * )bp->hwrm_addr_req;
1596  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_FREE, cmd_len );
1597  req->ring_type = ring_type;
1598  req->ring_id = ring_id;
1599  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1600 }
1601 
1602 static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type )
1603 {
1604  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_alloc_input );
1605  struct hwrm_ring_alloc_input *req;
1606  struct hwrm_ring_alloc_output *resp;
1607  int rc;
1608 
1609  DBGP ( "%s\n", __func__ );
1610  req = ( struct hwrm_ring_alloc_input * )bp->hwrm_addr_req;
1611  resp = ( struct hwrm_ring_alloc_output * )bp->hwrm_addr_resp;
1612  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_ALLOC, cmd_len );
1613  req->ring_type = type;
1614  switch ( type ) {
1616  req->page_size = LM_PAGE_BITS ( 12 );
1617  req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
1618  req->length = ( u32 )bp->nq.ring_cnt;
1619  req->logical_id = 0xFFFF; // Required value for Thor FW?
1620  req->page_tbl_addr = virt_to_bus ( bp->nq.bd_virt );
1621  break;
1623  req->page_size = LM_PAGE_BITS ( 8 );
1624  req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
1625  req->length = ( u32 )bp->cq.ring_cnt;
1626  req->page_tbl_addr = virt_to_bus ( bp->cq.bd_virt );
1627  if ( !bp->thor )
1628  break;
1630  req->nq_ring_id = bp->nq_ring_id;
1631  req->cq_handle = ( u64 )bp->nq_ring_id;
1632  break;
1634  req->page_size = LM_PAGE_BITS ( 8 );
1635  req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL;
1636  req->length = ( u32 )bp->tx.ring_cnt;
1637  req->queue_id = TX_RING_QID;
1638  req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
1639  req->cmpl_ring_id = bp->cq_ring_id;
1640  req->page_tbl_addr = virt_to_bus ( bp->tx.bd_virt );
1641  break;
1643  req->page_size = LM_PAGE_BITS ( 8 );
1644  req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL;
1645  req->length = ( u32 )bp->rx.ring_cnt;
1646  req->stat_ctx_id = ( u32 )STAT_CTX_ID;
1647  req->cmpl_ring_id = bp->cq_ring_id;
1648  req->page_tbl_addr = virt_to_bus ( bp->rx.bd_virt );
1649  if ( !bp->thor )
1650  break;
1651  req->queue_id = ( u16 )RX_RING_QID;
1652  req->rx_buf_size = MAX_ETHERNET_PACKET_BUFFER_SIZE;
1654  break;
1655  default:
1656  return STATUS_SUCCESS;
1657  }
1658  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1659  if ( rc ) {
1660  DBGP ( "- %s ( ): Failed, type = %x\n", __func__, type );
1661  return STATUS_FAILURE;
1662  }
1663 
1665  FLAG_SET ( bp->flag_hwrm, VALID_RING_CQ );
1666  bp->cq_ring_id = resp->ring_id;
1667  } else if ( type == RING_ALLOC_REQ_RING_TYPE_TX ) {
1668  FLAG_SET ( bp->flag_hwrm, VALID_RING_TX );
1669  bp->tx_ring_id = resp->ring_id;
1670  } else if ( type == RING_ALLOC_REQ_RING_TYPE_RX ) {
1671  FLAG_SET ( bp->flag_hwrm, VALID_RING_RX );
1672  bp->rx_ring_id = resp->ring_id;
1673  } else if ( type == RING_ALLOC_REQ_RING_TYPE_NQ ) {
1674  FLAG_SET ( bp->flag_hwrm, VALID_RING_NQ );
1675  bp->nq_ring_id = resp->ring_id;
1676  }
1677  return STATUS_SUCCESS;
1678 }
1679 
1680 static int bnxt_hwrm_ring_alloc_cq ( struct bnxt *bp )
1681 {
1682  DBGP ( "%s\n", __func__ );
1684 }
1685 
1686 static int bnxt_hwrm_ring_alloc_tx ( struct bnxt *bp )
1687 {
1688  DBGP ( "%s\n", __func__ );
1690 }
1691 
1692 static int bnxt_hwrm_ring_alloc_rx ( struct bnxt *bp )
1693 {
1694  DBGP ( "%s\n", __func__ );
1696 }
1697 
1698 static int bnxt_hwrm_ring_free_cq ( struct bnxt *bp )
1699 {
1700  int ret = STATUS_SUCCESS;
1701 
1702  DBGP ( "%s\n", __func__ );
1703  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_CQ ) ) )
1704  return ret;
1705 
1706  ret = RING_FREE ( bp, bp->cq_ring_id, RING_FREE_REQ_RING_TYPE_L2_CMPL );
1707  if ( ret == STATUS_SUCCESS )
1708  FLAG_RESET ( bp->flag_hwrm, VALID_RING_CQ );
1709 
1710  return ret;
1711 }
1712 
1713 static int bnxt_hwrm_ring_free_tx ( struct bnxt *bp )
1714 {
1715  int ret = STATUS_SUCCESS;
1716 
1717  DBGP ( "%s\n", __func__ );
1718  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_TX ) ) )
1719  return ret;
1720 
1721  ret = RING_FREE ( bp, bp->tx_ring_id, RING_FREE_REQ_RING_TYPE_TX );
1722  if ( ret == STATUS_SUCCESS )
1723  FLAG_RESET ( bp->flag_hwrm, VALID_RING_TX );
1724 
1725  return ret;
1726 }
1727 
1728 static int bnxt_hwrm_ring_free_rx ( struct bnxt *bp )
1729 {
1730  int ret = STATUS_SUCCESS;
1731 
1732  DBGP ( "%s\n", __func__ );
1733  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_RX ) ) )
1734  return ret;
1735 
1736  ret = RING_FREE ( bp, bp->rx_ring_id, RING_FREE_REQ_RING_TYPE_RX );
1737  if ( ret == STATUS_SUCCESS )
1738  FLAG_RESET ( bp->flag_hwrm, VALID_RING_RX );
1739 
1740  return ret;
1741 }
1742 
1743 static int bnxt_hwrm_ring_alloc_nq ( struct bnxt *bp )
1744 {
1745  if ( !bp->thor )
1746  return STATUS_SUCCESS;
1748 }
1749 
1750 static int bnxt_hwrm_ring_free_nq ( struct bnxt *bp )
1751 {
1752  int ret = STATUS_SUCCESS;
1753 
1754  if ( !bp->thor )
1755  return STATUS_SUCCESS;
1756 
1757  DBGP ( "%s\n", __func__ );
1758  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_NQ ) ) )
1759  return ret;
1760 
1761  ret = RING_FREE ( bp, bp->nq_ring_id, RING_FREE_REQ_RING_TYPE_NQ );
1762  if ( ret == STATUS_SUCCESS )
1763  FLAG_RESET ( bp->flag_hwrm, VALID_RING_NQ );
1764 
1765  return ret;
1766 }
1767 
1768 static int bnxt_hwrm_vnic_alloc ( struct bnxt *bp )
1769 {
1770  u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_alloc_input );
1771  struct hwrm_vnic_alloc_input *req;
1772  struct hwrm_vnic_alloc_output *resp;
1773  int rc;
1774 
1775  DBGP ( "%s\n", __func__ );
1776  req = ( struct hwrm_vnic_alloc_input * )bp->hwrm_addr_req;
1777  resp = ( struct hwrm_vnic_alloc_output * )bp->hwrm_addr_resp;
1778  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_ALLOC, cmd_len );
1779  req->flags = VNIC_ALLOC_REQ_FLAGS_DEFAULT;
1780  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1781  if ( rc ) {
1782  DBGP ( "- %s ( ): Failed\n", __func__ );
1783  return STATUS_FAILURE;
1784  }
1785 
1786  FLAG_SET ( bp->flag_hwrm, VALID_VNIC_ID );
1787  bp->vnic_id = resp->vnic_id;
1788  return STATUS_SUCCESS;
1789 }
1790 
1791 static int bnxt_hwrm_vnic_free ( struct bnxt *bp )
1792 {
1793  u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_free_input );
1794  struct hwrm_vnic_free_input *req;
1795  int rc;
1796 
1797  DBGP ( "%s\n", __func__ );
1798  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_VNIC_ID ) ) )
1799  return STATUS_SUCCESS;
1800 
1801  req = ( struct hwrm_vnic_free_input * )bp->hwrm_addr_req;
1802  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_FREE, cmd_len );
1803  req->vnic_id = bp->vnic_id;
1804  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1805  if ( rc ) {
1806  DBGP ( "- %s ( ): Failed\n", __func__ );
1807  return STATUS_FAILURE;
1808  }
1809 
1810  FLAG_RESET ( bp->flag_hwrm, VALID_VNIC_ID );
1811  return STATUS_SUCCESS;
1812 }
1813 
1814 static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp )
1815 {
1816  u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_cfg_input );
1817  struct hwrm_vnic_cfg_input *req;
1818 
1819  DBGP ( "%s\n", __func__ );
1820  req = ( struct hwrm_vnic_cfg_input * )bp->hwrm_addr_req;
1821  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_CFG, cmd_len );
1823  req->mru = bp->mtu;
1824 
1825  if ( bp->thor ) {
1828  req->default_rx_ring_id = bp->rx_ring_id;
1829  req->default_cmpl_ring_id = bp->cq_ring_id;
1830  } else {
1832  req->dflt_ring_grp = bp->ring_grp_id;
1833  }
1834 
1836  req->vnic_id = bp->vnic_id;
1837  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1838 }
1839 
1840 static int bnxt_set_rx_mask ( struct bnxt *bp )
1841 {
1842  return bnxt_hwrm_set_rx_mask ( bp, RX_MASK );
1843 }
1844 
1845 static int bnxt_reset_rx_mask ( struct bnxt *bp )
1846 {
1847  return bnxt_hwrm_set_rx_mask ( bp, 0 );
1848 }
1849 
1850 typedef int ( *hwrm_func_t ) ( struct bnxt *bp );
1851 
1853  bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */
1854  NULL,
1855 };
1856 
1858  bnxt_hwrm_cfa_l2_filter_free, /* HWRM_CFA_L2_FILTER_FREE */
1860  bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
1861  bnxt_free_rx_iob, /* HWRM_FREE_IOB */
1862  bnxt_hwrm_vnic_free, /* HWRM_VNIC_FREE */
1863  bnxt_hwrm_ring_free_grp, /* HWRM_RING_GRP_FREE */
1864  bnxt_hwrm_ring_free_rx, /* HWRM_RING_FREE - RX Ring */
1865  bnxt_hwrm_ring_free_tx, /* HWRM_RING_FREE - TX Ring */
1866  bnxt_hwrm_stat_ctx_free, /* HWRM_STAT_CTX_FREE */
1867  bnxt_hwrm_ring_free_cq, /* HWRM_RING_FREE - CQ Ring */
1868  bnxt_hwrm_ring_free_nq, /* HWRM_RING_FREE - NQ Ring */
1869  NULL,
1870 };
1872  bnxt_hwrm_ver_get, /* HWRM_VER_GET */
1873  bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */
1874  bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */
1875  bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */
1876  bnxt_hwrm_backing_store_cfg, /* HWRM_FUNC_BACKING_STORE_CFG */
1877  bnxt_hwrm_backing_store_qcfg, /* HWRM_FUNC_BACKING_STORE_QCFG */
1878  bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */
1879  bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */
1880  bnxt_get_vlan, /* HWRM_NVM_GET_VARIABLE - vlan */
1881  bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */
1882  bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG */
1883  bnxt_query_phy_link, /* HWRM_PORT_PHY_QCFG */
1884  bnxt_get_device_address, /* HW MAC address */
1885  NULL,
1886 };
1887 
1889  bnxt_hwrm_stat_ctx_alloc, /* HWRM_STAT_CTX_ALLOC */
1890  bnxt_hwrm_queue_qportcfg, /* HWRM_QUEUE_QPORTCFG */
1891  bnxt_hwrm_ring_alloc_nq, /* HWRM_RING_ALLOC - NQ Ring */
1892  bnxt_hwrm_ring_alloc_cq, /* HWRM_RING_ALLOC - CQ Ring */
1893  bnxt_hwrm_ring_alloc_tx, /* HWRM_RING_ALLOC - TX Ring */
1894  bnxt_hwrm_ring_alloc_rx, /* HWRM_RING_ALLOC - RX Ring */
1895  bnxt_hwrm_ring_alloc_grp, /* HWRM_RING_GRP_ALLOC - Group */
1896  bnxt_hwrm_vnic_alloc, /* HWRM_VNIC_ALLOC */
1897  bnxt_post_rx_buffers, /* Post RX buffers */
1898  bnxt_hwrm_set_async_event, /* ENABLES_ASYNC_EVENT_CR */
1899  bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
1900  bnxt_hwrm_cfa_l2_filter_alloc, /* HWRM_CFA_L2_FILTER_ALLOC */
1901  bnxt_get_phy_link, /* HWRM_PORT_PHY_QCFG - PhyLink */
1902  bnxt_set_rx_mask, /* HWRM_CFA_L2_SET_RX_MASK */
1903  NULL,
1904 };
1905 
1906 int bnxt_hwrm_run ( hwrm_func_t cmds[], struct bnxt *bp )
1907 {
1908  hwrm_func_t *ptr;
1909  int ret;
1910 
1911  for ( ptr = cmds; *ptr; ++ptr ) {
1912  memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
1913  memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
1914  ret = ( *ptr ) ( bp );
1915  if ( ret ) {
1916  DBGP ( "- %s ( ): Failed\n", __func__ );
1917  return STATUS_FAILURE;
1918  }
1919  }
1920  return STATUS_SUCCESS;
1921 }
1922 
1923 #define bnxt_down_chip( bp ) bnxt_hwrm_run ( bring_down_chip, bp )
1924 #define bnxt_up_chip( bp ) bnxt_hwrm_run ( bring_up_chip, bp )
1925 #define bnxt_down_nic( bp ) bnxt_hwrm_run ( bring_down_nic, bp )
1926 #define bnxt_up_nic( bp ) bnxt_hwrm_run ( bring_up_nic, bp )
1927 
1928 static int bnxt_open ( struct net_device *dev )
1929 {
1930  struct bnxt *bp = dev->priv;
1931 
1932  DBGP ( "%s\n", __func__ );
1933  bnxt_mm_nic ( bp );
1934  return (bnxt_up_nic ( bp ));
1935 }
1936 
1937 static void bnxt_tx_adjust_pkt ( struct bnxt *bp, struct io_buffer *iob )
1938 {
1939  u16 prev_len = iob_len ( iob );
1940 
1941  bp->vlan_tx = bnxt_get_pkt_vlan ( ( char * )iob->data );
1942  if ( !bp->vlan_tx && bp->vlan_id )
1943  bnxt_add_vlan ( iob, bp->vlan_id );
1944 
1945  dbg_tx_vlan ( bp, ( char * )iob->data, prev_len, iob_len ( iob ) );
1946  if ( iob_len ( iob ) != prev_len )
1947  prev_len = iob_len ( iob );
1948 
1949  iob_pad ( iob, ETH_ZLEN );
1950  dbg_tx_pad ( prev_len, iob_len ( iob ) );
1951 }
1952 
1953 static int bnxt_tx ( struct net_device *dev, struct io_buffer *iob )
1954 {
1955  struct bnxt *bp = dev->priv;
1956  u16 len, entry;
1957  dma_addr_t mapping;
1958 
1959  if ( bnxt_tx_avail ( bp ) < 1 ) {
1960  DBGP ( "- %s ( ): Failed no bd's available\n", __func__ );
1961  return -ENOBUFS;
1962  }
1963 
1964  bnxt_tx_adjust_pkt ( bp, iob );
1965  entry = bp->tx.prod_id;
1966  mapping = virt_to_bus ( iob->data );
1967  len = iob_len ( iob );
1968  bp->tx.iob[entry] = iob;
1969  bnxt_set_txq ( bp, entry, mapping, len );
1970  entry = NEXT_IDX ( entry, bp->tx.ring_cnt );
1971  dump_tx_pkt ( ( u8 * )iob->data, len, bp->tx.prod_id );
1972  /* Packets are ready, update Tx producer idx local and on card. */
1973  bnxt_db_tx ( bp, ( u32 )entry );
1974  bp->tx.prod_id = entry;
1975  bp->tx.cnt_req++;
1976  /* memory barrier */
1977  mb ( );
1978  return 0;
1979 }
1980 
1981 static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt )
1982 {
1983  u16 cons_id;
1984 
1985  cons_id = bp->nq.cons_id + cnt;
1986  if ( cons_id >= bp->nq.ring_cnt ) {
1987  /* Toggle completion bit when the ring wraps. */
1988  bp->nq.completion_bit ^= 1;
1989  cons_id = cons_id - bp->nq.ring_cnt;
1990  }
1991  bp->nq.cons_id = cons_id;
1992 }
1993 
1994 void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt )
1995 {
1996  switch ( evt->event_id ) {
1998  if ( evt->event_data1 & 0x01 )
1999  bp->link_status = STATUS_LINK_ACTIVE;
2000  else
2001  bp->link_status = STATUS_LINK_DOWN;
2002  bnxt_set_link ( bp );
2003  dbg_link_status ( bp );
2004  break;
2005  default:
2006  break;
2007  }
2008 }
2009 
2010 static void bnxt_service_cq ( struct net_device *dev )
2011 {
2012  struct bnxt *bp = dev->priv;
2013  struct cmpl_base *cmp;
2014  struct tx_cmpl *tx;
2015  u16 old_cid = bp->cq.cons_id;
2016  int done = SERVICE_NEXT_CQ_BD;
2017  u32 cq_type;
2018 
2019  while ( done == SERVICE_NEXT_CQ_BD ) {
2020  cmp = ( struct cmpl_base * )BD_NOW ( bp->cq.bd_virt,
2021  bp->cq.cons_id,
2022  sizeof ( struct cmpl_base ) );
2023 
2024  if ( ( cmp->info3_v & CMPL_BASE_V ) ^ bp->cq.completion_bit )
2025  break;
2026 
2027  cq_type = cmp->type & CMPL_BASE_TYPE_MASK;
2028  dump_evt ( ( u8 * )cmp, cq_type, bp->cq.cons_id, 0 );
2029  dump_cq ( cmp, bp->cq.cons_id );
2030 
2031  switch ( cq_type ) {
2032  case CMPL_BASE_TYPE_TX_L2:
2033  tx = ( struct tx_cmpl * )cmp;
2034  bnxt_tx_complete ( dev, ( u16 )tx->opaque );
2035  /* Fall through */
2037  bnxt_adv_cq_index ( bp, 1 );
2038  break;
2039  case CMPL_BASE_TYPE_RX_L2:
2040  done = bnxt_rx_complete ( dev,
2041  ( struct rx_pkt_cmpl * )cmp );
2042  break;
2044  bnxt_link_evt ( bp,
2045  ( struct hwrm_async_event_cmpl * )cmp );
2046  bnxt_adv_cq_index ( bp, 1 );
2047  break;
2048  default:
2050  break;
2051  }
2052  }
2053 
2054  if ( bp->cq.cons_id != old_cid )
2055  bnxt_db_cq ( bp );
2056 }
2057 
2058 static void bnxt_service_nq ( struct net_device *dev )
2059 {
2060  struct bnxt *bp = dev->priv;
2061  struct nq_base *nqp;
2062  u16 old_cid = bp->nq.cons_id;
2063  int done = SERVICE_NEXT_NQ_BD;
2064  u32 nq_type;
2065 
2066  if ( !bp->thor )
2067  return;
2068 
2069  while ( done == SERVICE_NEXT_NQ_BD ) {
2070  nqp = ( struct nq_base * )BD_NOW ( bp->nq.bd_virt,
2071  bp->nq.cons_id, sizeof ( struct nq_base ) );
2072  if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit )
2073  break;
2074  nq_type = ( nqp->type & NQ_CN_TYPE_MASK );
2075  dump_evt ( ( u8 * )nqp, nq_type, bp->nq.cons_id, 1 );
2076  dump_nq ( nqp, bp->nq.cons_id );
2077 
2078  switch ( nq_type ) {
2080  bnxt_link_evt ( bp,
2081  ( struct hwrm_async_event_cmpl * )nqp );
2082  /* Fall through */
2084  bnxt_adv_nq_index ( bp, 1 );
2085  break;
2086  default:
2088  break;
2089  }
2090  }
2091 
2092  if ( bp->nq.cons_id != old_cid )
2093  bnxt_db_nq ( bp );
2094 }
2095 
2096 static void bnxt_poll ( struct net_device *dev )
2097 {
2098  mb ( );
2099  bnxt_service_cq ( dev );
2100  bnxt_service_nq ( dev );
2101 }
2102 
2103 static void bnxt_close ( struct net_device *dev )
2104 {
2105  struct bnxt *bp = dev->priv;
2106 
2107  DBGP ( "%s\n", __func__ );
2108  bnxt_down_nic (bp);
2109 
2110  /* iounmap PCI BAR ( s ) */
2111  bnxt_down_pci(bp);
2112 
2113  /* Get Bar Address */
2114  bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 );
2115  bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 );
2116  bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 );
2117 
2118 }
2119 
2121  .open = bnxt_open,
2122  .close = bnxt_close,
2123  .poll = bnxt_poll,
2124  .transmit = bnxt_tx,
2125 };
2126 
2127 static int bnxt_init_one ( struct pci_device *pci )
2128 {
2129  struct net_device *netdev;
2130  struct bnxt *bp;
2131  int err = 0;
2132 
2133  DBGP ( "%s\n", __func__ );
2134  /* Allocate network device */
2135  netdev = alloc_etherdev ( sizeof ( *bp ) );
2136  if ( !netdev ) {
2137  DBGP ( "- %s ( ): alloc_etherdev Failed\n", __func__ );
2138  err = -ENOMEM;
2139  goto disable_pdev;
2140  }
2141 
2142  /* Initialise network device */
2144 
2145  /* Driver private area for this device */
2146  bp = netdev->priv;
2147 
2148  /* Set PCI driver private data */
2149  pci_set_drvdata ( pci, netdev );
2150 
2151  /* Clear Private area data */
2152  memset ( bp, 0, sizeof ( *bp ) );
2153  bp->pdev = pci;
2154  bp->dev = netdev;
2155  netdev->dev = &pci->dev;
2156 
2157  /* Enable PCI device */
2158  adjust_pci_device ( pci );
2159 
2160  /* Get PCI Information */
2161  bnxt_get_pci_info ( bp );
2162 
2163  /* Allocate and Initialise device specific parameters */
2164  if ( bnxt_alloc_mem ( bp ) != 0 ) {
2165  DBGP ( "- %s ( ): bnxt_alloc_mem Failed\n", __func__ );
2166  goto err_down_pci;
2167  }
2168 
2169  /* Get device specific information */
2170  if ( bnxt_up_chip ( bp ) != 0 ) {
2171  DBGP ( "- %s ( ): bnxt_up_chip Failed\n", __func__ );
2172  goto err_down_chip;
2173  }
2174 
2175  /* Register Network device */
2176  if ( register_netdev ( netdev ) != 0 ) {
2177  DBGP ( "- %s ( ): register_netdev Failed\n", __func__ );
2178  goto err_down_chip;
2179  }
2180 
2181  return 0;
2182 
2183 err_down_chip:
2184  bnxt_down_chip (bp);
2185  bnxt_free_mem ( bp );
2186 
2187 err_down_pci:
2188  bnxt_down_pci ( bp );
2189  netdev_nullify ( netdev );
2190  netdev_put ( netdev );
2191 
2192 disable_pdev:
2193  pci_set_drvdata ( pci, NULL );
2194  return err;
2195 }
2196 
2197 static void bnxt_remove_one ( struct pci_device *pci )
2198 {
2199  struct net_device *netdev = pci_get_drvdata ( pci );
2200  struct bnxt *bp = netdev->priv;
2201 
2202  DBGP ( "%s\n", __func__ );
2203  /* Unregister network device */
2205 
2206  /* Bring down Chip */
2207  bnxt_down_chip(bp);
2208 
2209  /* Free Allocated resource */
2210  bnxt_free_mem ( bp );
2211 
2212  /* iounmap PCI BAR ( s ) */
2213  bnxt_down_pci ( bp );
2214 
2215  /* Stop network device */
2216  netdev_nullify ( netdev );
2217 
2218  /* Drop refernce to network device */
2219  netdev_put ( netdev );
2220 }
2221 
2222 /* Broadcom NXE PCI driver */
2223 struct pci_driver bnxt_pci_driver __pci_driver = {
2224  .ids = bnxt_nics,
2225  .id_count = ARRAY_SIZE ( bnxt_nics ),
2226  .probe = bnxt_init_one,
2227  .remove = bnxt_remove_one,
2228 };
#define VNIC_CFG_REQ_ENABLES_MRU
Definition: bnxt_hsi.h:5510
#define dump_evt(cq, ty, id, ring)
Definition: bnxt_dbg.h:674
#define RING_ALLOC_REQ_RING_TYPE_NQ
Definition: bnxt_hsi.h:5904
#define u16
Definition: vga.h:20
#define dbg_tx_avail(bp, a, u)
Definition: bnxt_dbg.h:577
uint16_t u16
Definition: stdint.h:21
#define IPXE_VERSION_MAJOR
Definition: bnxt.h:38
#define bnxt_up_chip(bp)
Definition: bnxt.c:1924
#define EINVAL
Invalid argument.
Definition: errno.h:428
#define FLAG_SET(f, b)
Definition: bnxt.h:45
static u32 bnxt_set_ring_info(struct bnxt *bp)
Definition: bnxt.c:794
#define DB_OFFSET_VF
Definition: bnxt.h:187
#define LINK_SPEED_DRV_2_5G
Definition: bnxt.h:224
#define MAX_NQ_DESC_CNT
Definition: bnxt.h:169
#define HWRM_STAT_CTX_FREE
Definition: bnxt_hsi.h:191
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
Definition: bnxt_hsi.h:6369
#define STATUS_SUCCESS
Definition: bnxt.h:58
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
#define DBC_MSG_IDX(idx)
Definition: bnxt.h:188
#define pci_read_byte
Definition: bnxt.h:828
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition: netdevice.h:752
static void bnxt_service_cq(struct net_device *dev)
Definition: bnxt.c:2010
#define DETECT_MEDIA
Definition: bnxt.h:195
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
Definition: bnxt_hsi.h:2992
#define iob_put(iobuf, len)
Definition: iobuf.h:120
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB
Definition: bnxt_hsi.h:2978
#define MAC_HDR_SIZE
Definition: bnxt.h:180
static int bnxt_hwrm_ver_get(struct bnxt *bp)
Definition: bnxt.c:688
#define dbg_flags(func, flags)
Definition: bnxt_dbg.h:326
static void bnxt_db_tx(struct bnxt *bp, u32 idx)
Definition: bnxt.c:226
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:586
#define MEDIA_AUTO_DETECT_MASK
Definition: bnxt.h:859
u32 opaque
Definition: bnxt.h:597
#define HWRM_PORT_MAC_CFG
Definition: bnxt_hsi.h:119
#define HWRM_CFA_L2_FILTER_FREE
Definition: bnxt_hsi.h:172
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
Definition: bnxt_hsi.h:6368
#define HWRM_FUNC_CFG
Definition: bnxt_hsi.h:109
#define BD_NOW(bd, entry, len)
Definition: bnxt.h:159
#define IPXE_VERSION_UPDATE
Definition: bnxt.h:40
#define CMPL_BASE_V
Definition: bnxt.h:509
A PCI driver.
Definition: pci.h:247
#define SHORT_CMD_SUPPORTED
Definition: bnxt.h:831
#define dbg_hw_cmd(bp, func, cmd_len, resp_len, cmd_tmo, err)
Definition: bnxt_dbg.h:378
static unsigned int unsigned int reg
Definition: myson.h:162
#define TX_BD_FLAGS
Definition: bnxt.h:842
#define VLAN_VALUE_MASK
Definition: bnxt.h:863
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN
Definition: bnxt_hsi.h:1787
#define MEDIUM_SPEED_2500MBPS
Definition: bnxt.h:101
#define dbg_alloc_rx_iob(iob, id, cid)
Definition: bnxt_dbg.h:470
__le16 seq_id
Definition: bnxt_hsi.h:71
#define HWRM_CMD_DEFAULT_MULTIPLAYER(a)
Definition: bnxt.h:133
#define DEFAULT_NUMBER_OF_CMPL_RINGS
Definition: bnxt.h:138
static int bnxt_hwrm_ring_alloc_tx(struct bnxt *bp)
Definition: bnxt.c:1686
#define dbg_func_resource_qcaps(bp)
Definition: bnxt_dbg.h:320
#define RESP_BUFFER_SIZE
Definition: bnxt.h:154
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
#define DEFAULT_NUMBER_OF_RING_GRPS
Definition: bnxt.h:141
#define RX_PKT_CMPL_METADATA_VID_MASK
Definition: bnxt.h:624
__le16 def_req_timeout
Definition: bnxt_hsi.h:440
#define SERVICE_NEXT_NQ_BD
Definition: bnxt.h:177
#define HWRM_VER_GET
Definition: bnxt_hsi.h:98
#define dump_cq(cq, id)
Definition: bnxt_dbg.h:516
#define CQ_DOORBELL_KEY_IDX(a)
Definition: bnxt.h:838
Error codes.
int(* hwrm_func_t)(struct bnxt *bp)
Definition: bnxt.c:1850
#define HWRM_FUNC_RESET
Definition: bnxt_hsi.h:103
#define FUNC_DRV_RGTR_REQ_ENABLES_VER
Definition: bnxt_hsi.h:1736
#define dbg_short_cmd(sreq, func, len)
Definition: bnxt_dbg.h:398
__le16 signature
Definition: bnxt_hsi.h:87
static int bnxt_get_device_address(struct bnxt *bp)
Definition: bnxt.c:162
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB
Definition: bnxt_hsi.h:2973
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
Definition: bnxt.h:619
#define test_if
Definition: bnxt.h:825
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID
Definition: bnxt_hsi.h:6229
#define dbg_rxp(iob, rx_len, drop)
Definition: bnxt_dbg.h:473
#define HWRM_CMD_FLASH_MULTIPLAYER(a)
Definition: bnxt.h:134
#define RX_MASK_PROMISCUOUS_MODE
Definition: bnxt.h:92
unsigned long driver_data
Arbitrary driver data.
Definition: pci.h:178
#define LINK_SPEED_DRV_AUTONEG
Definition: bnxt.h:208
I/O buffers.
void free_iob(struct io_buffer *iobuf)
Free I/O buffer.
Definition: iobuf.c:146
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:249
static void hwrm_init(struct bnxt *bp, struct input *req, u16 cmd, u16 len)
Definition: bnxt.c:622
#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
Definition: bnxt_hsi.h:2987
#define NQ_RING_BUFFER_SIZE
Definition: bnxt.h:170
uint32_t type
Operating system type.
Definition: ena.h:12
int bnxt_hwrm_run(hwrm_func_t cmds[], struct bnxt *bp)
Definition: bnxt.c:1906
static int bnxt_hwrm_func_cfg_req(struct bnxt *bp)
Definition: bnxt.c:932
#define FLAG_TEST(f, b)
Definition: bnxt.h:46
#define VALID_DRIVER_REG
Definition: bnxt.h:710
#define PCI_COMMAND_INTX_DISABLE
Interrupt disable.
Definition: pci.h:32
static void bnxt_db_nq(struct bnxt *bp)
Definition: bnxt.c:199
#define dbg_tx_vlan(bp, src, plen, len)
Definition: bnxt_dbg.h:578
#define HWRM_VERSION_MINOR
Definition: bnxt_hsi.h:369
__le16 req_type
Definition: bnxt_hsi.h:86
#define RING_FREE(bp, rid, flag)
Definition: bnxt.h:850
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK
Definition: bnxt_hsi.h:2960
#define write32
Definition: bnxt.h:826
#define LINK_POLL_WAIT_TIME
Definition: bnxt.h:164
static int bnxt_hwrm_ring_free_rx(struct bnxt *bp)
Definition: bnxt.c:1728
Definition: bnxt_hsi.h:68
static void * bnxt_pci_base(struct pci_device *pdev, unsigned int reg)
Definition: bnxt.c:115
#define RING_FREE_REQ_RING_TYPE_TX
Definition: bnxt_hsi.h:5982
switch(len)
Definition: string.h:61
static int bnxt_hwrm_ring_alloc_grp(struct bnxt *bp)
Definition: bnxt.c:1558
uint16_t bp
Definition: registers.h:23
static u32 bnxt_tx_avail(struct bnxt *bp)
Definition: bnxt.c:276
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
Definition: bnxt_hsi.h:6202
static void *__malloc malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.h:62
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
Definition: bnxt_hsi.h:553
#define PCI_BASE_ADDRESS_0
Definition: pci.h:62
unsigned long dma_addr_t
Definition: bnx2.h:20
#define SET_MBA(p, m, s)
Definition: bnxt.h:852
#define HWRM_FUNC_BACKING_STORE_CFG
Definition: bnxt_hsi.h:283
#define D3_LINK_SPEED_FW_NUM
Definition: bnxt.h:295
#define D3_SPEED_FW_SHIFT
Definition: bnxt.h:858
static int bnxt_hwrm_ring_alloc(struct bnxt *bp, u8 type)
Definition: bnxt.c:1602
#define write64
Definition: bnxt.h:827
void bnxt_link_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition: bnxt.c:1994
__le16 default_rx_ring_id
Definition: bnxt_hsi.h:5519
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:230
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
Definition: bnxt.c:956
__le16 max_ext_req_len
Definition: bnxt_hsi.h:466
#define BNXT_RX_STD_DMA_SZ
Definition: bnxt.h:157
iPXE timers
#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
Definition: bnxt_hsi.h:1533
#define PCI_COMMAND
PCI command.
Definition: pci.h:25
void bnxt_mm_nic(struct bnxt *bp)
Definition: bnxt.c:525
static int bnxt_hwrm_cfa_l2_filter_alloc(struct bnxt *bp)
Definition: bnxt.c:1044
#define LINK_SPEED_DRV_NUM
Definition: bnxt.h:205
#define STATUS_LINK_DOWN
Definition: bnxt.h:63
#define BNXT_FLAG_MULTI_HOST
Definition: bnxt.h:51
#define MEDIUM_SPEED_10GBPS
Definition: bnxt.h:102
#define LM_PAGE_BITS(a)
Definition: bnxt.h:156
#define HWRM_FUNC_QCAPS
Definition: bnxt_hsi.h:107
static int bnxt_hwrm_func_resource_qcaps(struct bnxt *bp)
Definition: bnxt.c:731
#define GET_MEDIUM_SPEED(m)
Definition: bnxt.h:113
static int bnxt_hwrm_func_reset_req(struct bnxt *bp)
Definition: bnxt.c:918
#define FUNC_CFG_REQ_EVB_MODE_NO_EVB
Definition: bnxt_hsi.h:1607
#define PORT_PHY_QCFG_RESP_LINK_LINK
Definition: bnxt_hsi.h:3099
static int bnxt_init_one(struct pci_device *pci)
Definition: bnxt.c:2127
#define VLAN_HDR_SIZE
Definition: bnxt.h:181
static void bnxt_adv_cq_index(struct bnxt *bp, u16 cnt)
Definition: bnxt.c:447
static int bnxt_hwrm_nvm_get_variable_req(struct bnxt *bp, u16 data_len, u16 option_num, u16 dimensions, u16 index_0)
Definition: bnxt.c:1178
#define D3_SPEED_FW_MASK
Definition: bnxt.h:857
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:154
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:129
#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
Definition: bnxt_hsi.h:2940
#define MEDIUM_SPEED_50GBPS
Definition: bnxt.h:106
#define CHIP_NUM_57500
Definition: bnxt.h:871
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE
Definition: bnxt_hsi.h:2956
#define PCI_SUBSYSTEM_ID
PCI subsystem ID.
Definition: pci.h:78
struct device dev
Generic device.
Definition: pci.h:208
#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER
Definition: bnxt_hsi.h:1742
#define BNXT_FLAG_NPAR_MODE
Definition: bnxt.h:52
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK
Definition: bnxt_hsi.h:6215
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
Definition: bnxt_hsi.h:1549
u32 v
Definition: bnxt.h:545
u16 len
Definition: bnxt.h:441
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID
Definition: bnxt_hsi.h:5897
void bnxt_add_vlan(struct io_buffer *iob, u16 vlan)
Definition: bnxt.c:235
static void bnxt_close(struct net_device *dev)
Definition: bnxt.c:2103
dma_addr_t addr
Definition: bnxt.h:31
#define MEDIUM_SPEED_200GBPS
Definition: bnxt.h:108
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST
Definition: bnxt_hsi.h:1410
Dynamic memory allocation.
#define HWRM_FUNC_QCFG
Definition: bnxt_hsi.h:108
static int bnxt_hwrm_func_qcaps_req(struct bnxt *bp)
Definition: bnxt.c:847
#define PHY_STATUS
Definition: bnxt.h:193
#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT
Definition: bnxt.h:682
void bnxt_rx_process(struct net_device *dev, struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi)
Definition: bnxt.c:460
static struct pci_device_id bnxt_nics[]
Definition: bnxt.c:26
#define RX_MASK_ACCEPT_NONE
Definition: bnxt.h:86
#define MEDIUM_SPEED_25GBPS
Definition: bnxt.h:104
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:515
#define NQ_CN_TYPE_MASK
Definition: bnxt.h:534
int bnxt_free_rx_iob(struct bnxt *bp)
Definition: bnxt.c:321
#define dbg_pci(bp, func, creg)
Definition: bnxt_dbg.h:140
#define dump_nq(nq, id)
Definition: bnxt_dbg.h:517
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:359
#define DEFAULT_NUMBER_OF_STAT_CTXS
Definition: bnxt.h:142
#define STATUS_LINK_ACTIVE
Definition: bnxt.h:62
u16 type
Definition: bnxt.h:526
#define HWRM_RING_GRP_ALLOC
Definition: bnxt_hsi.h:165
#define ENOMEM
Not enough space.
Definition: errno.h:534
#define pci_write_word
Definition: bnxt.h:830
#define LINK_SPEED_FW_NUM
Definition: bnxt.h:271
#define LINK_SPEED_DRV_40G
Definition: bnxt.h:216
u32 opaque
Definition: bnxt.h:442
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define VALID_RING_CQ
Definition: bnxt.h:712
int bnxt_alloc_mem(struct bnxt *bp)
Definition: bnxt.c:595
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX
Definition: bnxt_hsi.h:2996
#define NO_MORE_NQ_BD_TO_SERVICE
Definition: bnxt.h:176
#define QCFG_PHY_ALL
Definition: bnxt.h:197
#define VALID_VNIC_ID
Definition: bnxt.h:716
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID
Definition: bnxt_hsi.h:5512
#define RX_MASK
Definition: bnxt.h:165
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
#define MAX_RX_DESC_CNT
Definition: bnx2.h:3885
#define VLAN_VALUE_SHIFT
Definition: bnxt.h:864
static int bnxt_hwrm_set_rx_mask(struct bnxt *bp, u32 rx_mask)
Definition: bnxt.c:1128
static int wait_resp(struct bnxt *bp, u32 tmo, u16 len, const char *func)
Definition: bnxt.c:658
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB
Definition: bnxt_hsi.h:2976
#define DBGP(...)
Definition: compiler.h:532
int bnxt_post_rx_buffers(struct bnxt *bp)
Definition: bnxt.c:371
#define REQ_BUFFER_SIZE
Definition: bnxt.h:153
#define HWRM_QUEUE_QPORTCFG
Definition: bnxt_hsi.h:134
#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR
Definition: bnxt_hsi.h:1544
#define SHORT_CMD_REQUIRED
Definition: bnxt.h:832
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
#define RX_RING_BUFFER_SIZE
Definition: bnxt.h:148
#define HWRM_CFA_L2_SET_RX_MASK
Definition: bnxt_hsi.h:174
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:572
#define DEFAULT_NUMBER_OF_RX_RINGS
Definition: bnxt.h:140
#define dbg_mem(bp, func)
Definition: bnxt_dbg.h:175
Ethernet protocol.
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
Definition: bnxt.c:985
#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
Definition: bnxt.h:499
static void bnxt_tx_adjust_pkt(struct bnxt *bp, struct io_buffer *iob)
Definition: bnxt.c:1937
#define HWRM_FUNC_VF_CFG
Definition: bnxt_hsi.h:101
#define BYTE_SWAP_S(w)
Definition: bnxt.h:183
#define dbg_func_qcaps(bp)
Definition: bnxt_dbg.h:321
static int bnxt_reset_rx_mask(struct bnxt *bp)
Definition: bnxt.c:1845
void * priv
Driver private data.
Definition: netdevice.h:431
#define SPEED_FW_SHIFT
Definition: bnxt.h:856
static int bnxt_hwrm_ring_free_nq(struct bnxt *bp)
Definition: bnxt.c:1750
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID
Definition: bnxt_hsi.h:5896
static int bnxt_hwrm_ring_alloc_rx(struct bnxt *bp)
Definition: bnxt.c:1692
#define DB_OFFSET_PF
Definition: bnxt.h:186
#define dbg_rx_vlan(bp, metadata, flags2, rx_vid)
Definition: bnxt_dbg.h:469
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:774
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP
Definition: bnxt_hsi.h:5506
#define PCI_BASE_ADDRESS_2
Definition: pci.h:64
static int bnxt_hwrm_cfa_l2_filter_free(struct bnxt *bp)
Definition: bnxt.c:1085
#define pci_read_word16
Definition: bnxt.h:829
#define HWRM_CFA_L2_FILTER_ALLOC
Definition: bnxt_hsi.h:171
#define u32
Definition: vga.h:21
#define bnxt_down_nic(bp)
Definition: bnxt.c:1925
void udelay(unsigned long usecs)
Delay for a fixed number of microseconds.
Definition: timer.c:60
u32 info3_v
Definition: bnxt.h:508
static int bnxt_hwrm_set_async_event(struct bnxt *bp)
Definition: bnxt.c:1006
#define BNXT_DMA_ALIGNMENT
Definition: bnxt.h:151
static struct net_device * netdev
Definition: gdbudp.c:52
uint64_t u64
Definition: stdint.h:25
#define PORT_PHY_FLAGS
Definition: bnxt.h:847
#define u8
Definition: igbvf_osdep.h:38
static int bnxt_hwrm_ring_alloc_cq(struct bnxt *bp)
Definition: bnxt.c:1680
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
#define STATUS_TIMEOUT
Definition: bnxt.h:82
#define NQ_CN_TYPE_CQ_NOTIFICATION
Definition: bnxt.h:537
static void bnxt_tx_complete(struct net_device *dev, u16 hw_idx)
Definition: bnxt.c:308
#define CMPL_BASE_TYPE_RX_L2
Definition: bnxt.h:491
#define TX_AVAIL(r)
Definition: bnxt.h:174
static void bnxt_down_pci(struct bnxt *bp)
Definition: bnxt.c:98
#define HWRM_NA_SIGNATURE
Definition: bnxt_hsi.h:362
__le16 cmpl_ring
Definition: bnxt_hsi.h:70
#define bnxt_down_chip(bp)
Definition: bnxt.c:1923
#define NUM_RX_BUFFERS
Definition: bnxt.h:143
#define HWRM_FUNC_DRV_RGTR
Definition: bnxt_hsi.h:115
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition: netdevice.c:941
static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, u16 idx)
Definition: bnxt.c:1143
static int bnxt_hwrm_ring_free_tx(struct bnxt *bp)
Definition: bnxt.c:1713
static int bnxt_get_pci_info(struct bnxt *bp)
Definition: bnxt.c:124
hwrm_func_t bring_up_chip[]
Definition: bnxt.c:1871
u32 flags2
Definition: bnxt.h:611
__le64 req_addr
Definition: bnxt_hsi.h:92
static void bnxt_remove_one(struct pci_device *pci)
Definition: bnxt.c:2197
#define SUPPORT_SPEEDS
Definition: bnxt.h:196
#define TX_RING_BUFFER_SIZE
Definition: bnxt.h:147
#define TX_BD_SHORT_FLAGS_LHINT_LT512
Definition: bnxt.h:435
#define HWRM_FUNC_RESOURCE_QCAPS
Definition: bnxt_hsi.h:280
#define MAX_CQ_DESC_CNT
Definition: bnxt.h:146
static __always_inline void off_t userptr_t src
Definition: efi_uaccess.h:66
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID
Definition: bnxt_hsi.h:5511
#define bnxt_up_nic(bp)
Definition: bnxt.c:1926
void bnxt_mm_init(struct bnxt *bp, const char *func)
Definition: bnxt.c:504
#define BNXT_CQ_INTR_MODE(vf)
Definition: bnxt.h:160
#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM
Definition: bnxt.h:333
#define dbg_rx_cid(idx, cid)
Definition: bnxt_dbg.h:471
void bnxt_set_txq(struct bnxt *bp, int entry, dma_addr_t mapping, int len)
Definition: bnxt.c:288
__le16 default_cmpl_ring_id
Definition: bnxt_hsi.h:5520
#define dbg_rx_stat(bp)
Definition: bnxt_dbg.h:474
#define TX_BD_SHORT_FLAGS_LHINT_LT1K
Definition: bnxt.h:436
#define VALID_RING_NQ
Definition: bnxt.h:719
#define STATUS_FAILURE
Definition: bnxt.h:59
static int bnxt_rx_complete(struct net_device *dev, struct rx_pkt_cmpl *rx)
Definition: bnxt.c:484
#define dbg_alloc_rx_iob_fail(iob_idx, cons_id)
Definition: bnxt_dbg.h:472
#define HWRM_STAT_CTX_ALLOC
Definition: bnxt_hsi.h:190
static int bnxt_tx(struct net_device *dev, struct io_buffer *iob)
Definition: bnxt.c:1953
union aes_table_entry entry[256]
Table entries, indexed by S(N)
Definition: aes.c:26
#define DBC_DBC_TYPE_SRQ
Definition: bnxt.h:404
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Find the size of a PCI BAR.
Definition: pciextra.c:92
#define CMPL_BASE_TYPE_TX_L2
Definition: bnxt.h:490
#define VLAN_SHIFT
Definition: bnxt.h:862
#define DBC_DBC_TYPE_CQ_ARMALL
Definition: bnxt.h:408
#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
Definition: bnxt_hsi.h:1535
PCI bus.
static int bnxt_get_link_speed(struct bnxt *bp)
Definition: bnxt.c:1197
A PCI device.
Definition: pci.h:206
int register_netdev(struct net_device *netdev)
Register network device.
Definition: netdevice.c:759
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:155
#define LINK_SPEED_DRV_MASK
Definition: bnxt.h:206
__le16 req_type
Definition: bnxt_hsi.h:69
#define MEDIUM_SPEED_1000MBPS
Definition: bnxt.h:100
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT
Definition: bnxt.h:636
#define VALID_L2_FILTER
Definition: bnxt.h:718
#define HWRM_CMD_WAIT(b)
Definition: bnxt.h:136
A network device.
Definition: netdevice.h:352
static int bnxt_hwrm_vnic_free(struct bnxt *bp)
Definition: bnxt.c:1791
#define PORT_MAC_CFG_REQ_LPBK_NONE
Definition: bnxt_hsi.h:3378
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:528
#define ARRAY_SIZE(x)
Definition: efx_common.h:43
u16 type
Definition: bnxt.h:487
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB
Definition: bnxt_hsi.h:2970
#define dump_tx_pkt(pkt, len, idx)
Definition: bnxt_dbg.h:581
#define CQ_RING_BUFFER_SIZE
Definition: bnxt.h:150
hwrm_func_t bring_down_nic[]
Definition: bnxt.c:1857
static void hwrm_write_req(struct bnxt *bp, void *req, u32 cnt)
Definition: bnxt.c:632
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
Definition: bnxt_hsi.h:2958
#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
Definition: bnxt_hsi.h:6366
#define PHY_SPEED
Definition: bnxt.h:194
static void thor_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag)
Definition: bnxt.c:184
static int bnxt_hwrm_ring_free_cq(struct bnxt *bp)
Definition: bnxt.c:1698
#define PORT_PHY_CFG_REQ_FLAGS_FORCE
Definition: bnxt_hsi.h:2942
#define FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM
Definition: bnxt.h:341
#define dump_tx_stat(bp)
Definition: bnxt_dbg.h:580
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE
Definition: bnxt_hsi.h:1735
#define RX_MASK_ACCEPT_ALL_MULTICAST
Definition: bnxt.h:89
static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp)
Definition: bnxt.c:1366
#define ETH_ALEN
Definition: if_ether.h:8
#define ETH_ZLEN
Definition: if_ether.h:10
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
Definition: bnxt.c:1325
#define STAT_CTX_ID
Definition: bnxt.h:173
A PCI device ID list entry.
Definition: pci.h:170
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB
Definition: bnxt_hsi.h:2979
static void bnxt_hwrm_assign_resources(struct bnxt *bp)
Definition: bnxt.c:829
#define SHORT_REQ_SIGNATURE_SHORT_CMD
Definition: bnxt_hsi.h:88
FILE_LICENCE(GPL2_ONLY)
#define TX_BD_SHORT_FLAGS_LHINT_GTE2K
Definition: bnxt.h:438
static int bnxt_hwrm_port_mac_cfg(struct bnxt *bp)
Definition: bnxt.c:1351
#define VF_CFG_ENABLE_FLAGS
Definition: bnxt.h:865
#define HWRM_VNIC_CFG
Definition: bnxt_hsi.h:150
#define TX_BD_SHORT_FLAGS_LHINT_LT2K
Definition: bnxt.h:437
static int bnxt_hwrm_ring_alloc_nq(struct bnxt *bp)
Definition: bnxt.c:1743
static int is_valid_ether_addr(const void *addr)
Check if Ethernet address is valid.
Definition: ethernet.h:77
void * memmove(void *dest, const void *src, size_t len) __nonnull
static void bnxt_set_link(struct bnxt *bp)
Definition: bnxt.c:176
static int bnxt_query_phy_link(struct bnxt *bp)
Definition: bnxt.c:1432
#define HWRM_FUNC_DRV_UNRGTR
Definition: bnxt_hsi.h:112
#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT
Definition: bnxt_hsi.h:6247
#define ETHERTYPE_VLAN
Definition: bnxt.h:182
#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
Definition: bnxt_hsi.h:1534
#define dbg_chip_info(bp)
Definition: bnxt_dbg.h:324
u16 flags_type
Definition: bnxt.h:422
void __asmcall int val
Definition: setjmp.h:28
u8 bnxt_rx_drop(struct bnxt *bp, struct io_buffer *iob, struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len)
Definition: bnxt.c:398
#define FLAG_RESET(f, b)
Definition: bnxt.h:47
Network device operations.
Definition: netdevice.h:213
#define HWRM_RING_FREE
Definition: bnxt_hsi.h:160
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:548
static void bnxt_db_rx(struct bnxt *bp, u32 idx)
Definition: bnxt.c:218
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX
Definition: bnxt_hsi.h:2995
struct device * dev
Underlying hardware device.
Definition: netdevice.h:364
#define HWRM_VNIC_ALLOC
Definition: bnxt_hsi.h:148
#define HWRM_PORT_PHY_QCFG
Definition: bnxt_hsi.h:125
#define LINK_SPEED_DRV_10G
Definition: bnxt.h:212
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB
Definition: bnxt_hsi.h:2977
#define TX_IN_USE(a, b, c)
Definition: bnxt.h:175
static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
Definition: bnxt.c:1512
static int bnxt_hwrm_vnic_cfg(struct bnxt *bp)
Definition: bnxt.c:1814
#define MEDIUM_SPEED_AUTONEG
Definition: bnxt.h:96
Network device management.
#define CMPL_BASE_TYPE_STAT_EJECT
Definition: bnxt.h:495
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:369
u8 bnxt_is_pci_vf(struct pci_device *pdev)
Check if Virtual Function.
Definition: bnxt.c:90
#define MAX_TX_DESC_CNT
Definition: bnx2.h:3881
#define dbg_tx_done(pkt, len, idx)
Definition: bnxt_dbg.h:583
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition: timer.c:78
#define PCICFG_ME_REGISTER
Definition: bnxt.h:122
#define HWRM_NVM_GET_VARIABLE
Definition: bnxt_hsi.h:308
#define HWRM_VERSION_UPDATE
Definition: bnxt_hsi.h:370
struct net_device * dev
Definition: bnxt.h:739
#define LINK_DEFAULT_TIMEOUT
Definition: bnxt.h:163
#define CMPL_DOORBELL_KEY_CMPL
Definition: bnxt.h:381
Definition: bnxt.h:446
#define NO_MORE_CQ_BD_TO_SERVICE
Definition: bnxt.h:178
#define prn_set_speed(speed)
Definition: bnxt_dbg.h:323
#define RING_ALLOC_REQ_RING_TYPE_TX
Definition: bnxt_hsi.h:5900
static void short_hwrm_cmd_req(struct bnxt *bp, u16 len)
Definition: bnxt.c:643
static int bnxt_hwrm_ring_free_grp(struct bnxt *bp)
Definition: bnxt.c:1535
#define RX_MASK_ACCEPT_MULTICAST
Definition: bnxt.h:88
u32 opaque
Definition: bnxt.h:691
#define SPEED_DRV_SHIFT
Definition: bnxt.h:854
uint32_t len
Length.
Definition: ena.h:14
#define dbg_fw_ver(resp, tmo)
Definition: bnxt_dbg.h:319
#define ENOBUFS
No buffer space available.
Definition: errno.h:498
#define MEDIUM_FULL_DUPLEX
Definition: bnxt.h:116
static int bnxt_get_vlan(struct bnxt *bp)
Definition: bnxt.c:1261
#define VALID_STAT_CTX
Definition: bnxt.h:711
Media Independent Interface constants.
void bnxt_free_mem(struct bnxt *bp)
Definition: bnxt.c:552
#define VALID_RING_RX
Definition: bnxt.h:714
__le16 num_hw_ring_grps
Definition: bnxt_hsi.h:1562
#define FUNC_CFG_REQ_ENABLES_EVB_MODE
Definition: bnxt_hsi.h:1547
#define RING_ALLOC_REQ_RING_TYPE_RX
Definition: bnxt_hsi.h:5901
static void bnxt_adv_nq_index(struct bnxt *bp, u16 cnt)
Definition: bnxt.c:1981
#define dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx)
Definition: bnxt_dbg.h:468
__le16 target_id
Definition: bnxt_hsi.h:72
#define RING_FREE_REQ_RING_TYPE_L2_CMPL
Definition: bnxt_hsi.h:5981
static int bnxt_alloc_rx_iob(struct bnxt *bp, u16 cons_id, u16 iob_idx)
Definition: bnxt.c:354
#define RING_FREE_REQ_RING_TYPE_RX
Definition: bnxt_hsi.h:5983
void * data
Start of data.
Definition: iobuf.h:48
static int bnxt_hwrm_vnic_alloc(struct bnxt *bp)
Definition: bnxt.c:1768
#define LINK_SPEED_DRV_50G
Definition: bnxt.h:218
#define barrier()
Optimisation barrier.
Definition: compiler.h:655
static int bnxt_hwrm_backing_store_qcfg(struct bnxt *bp)
Definition: bnxt.c:1293
#define NQ_CN_V
Definition: bnxt.h:551
#define FUNC_CFG_REQ_ENABLES_NUM_MSIX
Definition: bnxt_hsi.h:1551
#define HWRM_PORT_PHY_CFG
Definition: bnxt_hsi.h:118
#define TX_RING_QID
Definition: bnxt.h:171
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
Definition: bnxt_hsi.h:1739
__le64 resp_addr
Definition: bnxt_hsi.h:73
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition: ethernet.c:264
u8 rx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets from the AP.
Definition: wpa.h:234
#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
Definition: bnxt_hsi.h:1538
struct pci_device_id * id
Driver device ID.
Definition: pci.h:243
u16 errors_v2
Definition: bnxt.h:631
#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
Definition: bnxt_hsi.h:6367
static int bnxt_open(struct net_device *dev)
Definition: bnxt.c:1928
#define FUNC_CFG_REQ_ENABLES_NUM_VNICS
Definition: bnxt_hsi.h:1537
void iounmap(volatile const void *io_addr)
Unmap I/O address.
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
Definition: bnxt.c:1490
static void bnxt_poll(struct net_device *dev)
Definition: bnxt.c:2096
#define MAX_ETHERNET_PACKET_BUFFER_SIZE
Definition: bnxt.h:137
#define DEFAULT_NUMBER_OF_TX_RINGS
Definition: bnxt.h:139
#define BNXT_FLAG_PCI_VF
Definition: bnxt.h:54
#define RING_FREE_REQ_RING_TYPE_NQ
Definition: bnxt_hsi.h:5986
static int bnxt_set_rx_mask(struct bnxt *bp)
Definition: bnxt.c:1840
#define LINK_SPEED_DRV_200G
Definition: bnxt.h:222
#define HWRM_RING_ALLOC
Definition: bnxt_hsi.h:159
#define DMA_BUFFER_SIZE
Definition: bnxt.h:155
static int bnxt_hwrm_func_qcfg_req(struct bnxt *bp)
Definition: bnxt.c:877
#define SPEED_FW_MASK
Definition: bnxt.h:855
#define SERVICE_NEXT_CQ_BD
Definition: bnxt.h:179
#define SET_MEDIUM_DUPLEX(bp, d)
Definition: bnxt.h:119
int bnxt_vlan_drop(struct bnxt *bp, u16 rx_vlan)
Definition: bnxt.c:256
static void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.h:77
#define SPEED_DRV_MASK
Definition: bnxt.h:853
#define LINK_SPEED_DRV_100G
Definition: bnxt.h:220
#define DBC_DBC_TYPE_SQ
Definition: bnxt.h:402
static u16 bnxt_get_pkt_vlan(char *src)
Definition: bnxt.c:249
void mb(void)
Memory barrier.
#define dbg_link_status(bp)
Definition: bnxt_dbg.h:675
#define RX_DOORBELL_KEY_RX
Definition: bnxt.h:369
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL
Definition: bnxt_hsi.h:5899
#define RX_PKT_CMPL_V2
Definition: bnxt.h:632
#define PCI_SUBSYSTEM_VENDOR_ID
PCI subsystem vendor ID.
Definition: pci.h:75
#define NEXT_IDX(N, S)
Definition: bnxt.h:158
#define BNXT_FLAG_HWRM_SHORT_CMD_SUPP
Definition: bnxt.h:48
#define VALID_RING_TX
Definition: bnxt.h:713
#define dbg_num_rings(bp)
Definition: bnxt_dbg.h:325
#define MEDIUM_SPEED_40GBPS
Definition: bnxt.h:105
#define LINK_SPEED_DRV_1G
Definition: bnxt.h:210
#define TX_DOORBELL_KEY_TX
Definition: bnxt.h:359
#define dbg_link_state(bp, tmo)
Definition: bnxt_dbg.h:676
#define RX_RING_QID
Definition: bnxt.h:172
#define FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM
Definition: bnxt.h:345
#define RING_ALLOC_REQ_INT_MODE_POLL
Definition: bnxt_hsi.h:5955
#define dbg_func_qcfg(bp)
Definition: bnxt_dbg.h:322
u32 set_rx_mask(u32 rx_mask)
Definition: bnxt.c:1109
#define PCI_BASE_ADDRESS_4
Definition: pci.h:66
u32 metadata
Definition: bnxt.h:623
#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE
Definition: bnxt_hsi.h:5499
static void bnxt_service_nq(struct net_device *dev)
Definition: bnxt.c:2058
Definition: bnxt.h:525
#define HWRM_VERSION_MAJOR
Definition: bnxt_hsi.h:368
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
hwrm_func_t bring_down_chip[]
Definition: bnxt.c:1852
#define MEDIUM_SPEED_100GBPS
Definition: bnxt.h:107
u16 len
Definition: bnxt.h:596
hwrm_func_t bring_up_nic[]
Definition: bnxt.c:1888
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE
Definition: bnxt_hsi.h:2060
#define dbg_tx_pad(plen, len)
Definition: bnxt_dbg.h:579
static void bnxt_set_rx_desc(u8 *buf, struct io_buffer *iob, u16 cid, u32 idx)
Definition: bnxt.c:341
#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME
Definition: bnxt_hsi.h:1175
static struct net_device_operations bnxt_netdev_ops
Definition: bnxt.c:2120
#define HWRM_FUNC_BACKING_STORE_QCFG
Definition: bnxt_hsi.h:284
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR
Definition: bnxt_hsi.h:6214
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB
Definition: bnxt_hsi.h:2975
#define HWRM_RING_GRP_FREE
Definition: bnxt_hsi.h:166
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0
Definition: bnxt_hsi.h:1426
#define BNXT_FLAG_RESOURCE_QCAPS_SUPPORT
Definition: bnxt.h:50
#define FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED
Definition: bnxt.h:350
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
#define SET_LINK(p, m, s)
Definition: bnxt.h:851
__le16 max_req_win_len
Definition: bnxt_hsi.h:438
#define DBC_DBC_TYPE_NQ_ARM
Definition: bnxt.h:413
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition: pci.h:303
#define CMPL_BASE_TYPE_MASK
Definition: bnxt.h:488
#define SET_MEDIUM_SPEED(bp, s)
Definition: bnxt.h:114
void iob_pad(struct io_buffer *iobuf, size_t min_len)
Pad I/O buffer.
Definition: iobpad.c:49
static int bnxt_hwrm_backing_store_cfg(struct bnxt *bp)
Definition: bnxt.c:1308
#define VLAN_MASK
Definition: bnxt.h:861
#define GRC_COM_CHAN_TRIG
Definition: bnxt.h:124
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST
Definition: bnxt_hsi.h:6206
struct bofm_section_header done
Definition: bofm_test.c:46
#define HWRM_MAX_REQ_LEN
Definition: bnxt_hsi.h:363
#define HWRM_CMD_POLL_WAIT_TIME
Definition: bnxt.h:132
uint8_t u8
Definition: stdint.h:19
#define HWRM_VNIC_FREE
Definition: bnxt_hsi.h:149
uint32_t u32
Definition: stdint.h:23
#define MEDIA_AUTO_DETECT_SHIFT
Definition: bnxt.h:860
#define VALID_RING_GRP
Definition: bnxt.h:715
#define DMA_ALIGN_4K
Definition: bnxt.h:152
union dma_addr64_t dma
Definition: bnxt.h:692
Definition: bnxt.h:721
u16 flags_type
Definition: bnxt.h:679
#define IPXE_VERSION_MINOR
Definition: bnxt.h:39
#define GRC_COM_CHAN_BASE
Definition: bnxt.h:123
uint16_t flag
Flag number.
Definition: hyperv.h:14
u8 tx[WPA_TKIP_MIC_KEY_LEN]
MIC key for packets to the AP.
Definition: wpa.h:237
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT
Definition: bnxt_hsi.h:5454
#define HWRM_CMD_DEFAULT_TIMEOUT
Definition: bnxt.h:131
static int bnxt_get_phy_link(struct bnxt *bp)
Definition: bnxt.c:1467
if(natsemi->flags &NATSEMI_64BIT) return 1
union dma_addr64_t dma
Definition: bnxt.h:443
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX
Definition: bnxt_hsi.h:2957
void * memset(void *dest, int character, size_t len) __nonnull
A persistent I/O buffer.
Definition: iobuf.h:33
uint8_t flags
Flags.
Definition: ena.h:18
#define DBC_MSG_XID(xid, flg)
Definition: bnxt.h:190
struct pci_driver bnxt_pci_driver __pci_driver
Definition: bnxt.c:2223
#define VALID_RX_IOB
Definition: bnxt.h:717
int bnxt_hwrm_ring_free(struct bnxt *bp, u16 ring_id, u8 ring_type)
Definition: bnxt.c:1589
#define LINK_SPEED_DRV_25G
Definition: bnxt.h:214
static void bnxt_db_cq(struct bnxt *bp)
Definition: bnxt.c:208