iPXE
bnxt.c
Go to the documentation of this file.
1 
2 FILE_LICENCE ( GPL2_ONLY );
3 
4 #include <mii.h>
5 #include <stdio.h>
6 #include <errno.h>
7 #include <unistd.h>
8 #include <byteswap.h>
9 #include <ipxe/pci.h>
10 #include <ipxe/iobuf.h>
11 #include <ipxe/timer.h>
12 #include <ipxe/malloc.h>
13 #include <ipxe/if_ether.h>
14 #include <ipxe/ethernet.h>
15 #include <ipxe/netdevice.h>
16 #include "bnxt.h"
17 #include "bnxt_dbg.h"
18 
19 static void bnxt_service_cq ( struct net_device *dev );
20 static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx );
21 static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt );
22 static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt );
23 static int bnxt_rx_complete ( struct net_device *dev, struct rx_pkt_cmpl *rx );
24 void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt );
25 
26 /**
27  * Check if Virtual Function
28  */
29 u8 bnxt_is_pci_vf ( struct pci_device *pdev )
30 {
31  u16 i;
32 
33  for ( i = 0; i < ARRAY_SIZE ( bnxt_vf_nics ); i++ ) {
34  if ( pdev->device == bnxt_vf_nics[i] )
35  return 1;
36  }
37  return 0;
38 }
39 
40 static void bnxt_down_pci ( struct bnxt *bp )
41 {
42  DBGP ( "%s\n", __func__ );
43  if ( bp->bar2 ) {
44  iounmap ( bp->bar2 );
45  bp->bar2 = NULL;
46  }
47  if ( bp->bar1 ) {
48  iounmap ( bp->bar1 );
49  bp->bar1 = NULL;
50  }
51  if ( bp->bar0 ) {
52  iounmap ( bp->bar0 );
53  bp->bar0 = NULL;
54  }
55 }
56 
57 static void *bnxt_pci_base ( struct pci_device *pdev, unsigned int reg )
58 {
59  unsigned long reg_base, reg_size;
60 
61  reg_base = pci_bar_start ( pdev, reg );
62  reg_size = pci_bar_size ( pdev, reg );
63  return ioremap ( reg_base, reg_size );
64 }
65 
66 static int bnxt_get_pci_info ( struct bnxt *bp )
67 {
68  u16 cmd_reg = 0;
69 
70  DBGP ( "%s\n", __func__ );
71  /* Disable Interrupt */
72  pci_read_word16 ( bp->pdev, PCI_COMMAND, &bp->cmd_reg );
73  cmd_reg = bp->cmd_reg | PCI_COMMAND_INTX_DISABLE;
74  pci_write_word ( bp->pdev, PCI_COMMAND, cmd_reg );
75  pci_read_word16 ( bp->pdev, PCI_COMMAND, &cmd_reg );
76 
77  /* SSVID */
78  pci_read_word16 ( bp->pdev,
80  &bp->subsystem_vendor );
81 
82  /* SSDID */
83  pci_read_word16 ( bp->pdev,
85  &bp->subsystem_device );
86 
87  /* Function Number */
88  pci_read_byte ( bp->pdev,
90  &bp->pf_num );
91 
92  /* Get Bar Address */
93  bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 );
94  bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 );
95  bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 );
96 
97  /* Virtual function */
98  bp->vf = bnxt_is_pci_vf ( bp->pdev );
99 
100  dbg_pci ( bp, __func__, cmd_reg );
101  return STATUS_SUCCESS;
102 }
103 
104 static int bnxt_get_device_address ( struct bnxt *bp )
105 {
106  struct net_device *dev = bp->dev;
107 
108  DBGP ( "%s\n", __func__ );
109  memcpy ( &dev->hw_addr[0], ( char * )&bp->mac_addr[0], ETH_ALEN );
110  if ( !is_valid_ether_addr ( &dev->hw_addr[0] ) ) {
111  DBGP ( "- %s ( ): Failed\n", __func__ );
112  return -EINVAL;
113  }
114 
115  return STATUS_SUCCESS;
116 }
117 
118 static void bnxt_set_link ( struct bnxt *bp )
119 {
120  if ( bp->link_status == STATUS_LINK_ACTIVE )
121  netdev_link_up ( bp->dev );
122  else
123  netdev_link_down ( bp->dev );
124 }
125 
126 static void thor_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag )
127 {
128  void *off;
129  u64 val;
130 
131  if ( bp->vf )
132  off = ( void * ) ( bp->bar1 + DB_OFFSET_VF );
133  else
134  off = ( void * ) ( bp->bar1 + DB_OFFSET_PF );
135 
136  val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) |
137  ( u64 )DBC_MSG_IDX ( idx );
138  write64 ( val, off );
139 }
140 
141 static void bnxt_db_nq ( struct bnxt *bp )
142 {
143  if ( bp->thor )
144  thor_db ( bp, ( u32 )bp->nq.cons_id,
145  ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM );
146  else
147  write32 ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) );
148 }
149 
150 static void bnxt_db_cq ( struct bnxt *bp )
151 {
152  if ( bp->thor )
153  thor_db ( bp, ( u32 )bp->cq.cons_id,
154  ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL );
155  else
156  write32 ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ),
157  ( bp->bar1 + 0 ) );
158 }
159 
160 static void bnxt_db_rx ( struct bnxt *bp, u32 idx )
161 {
162  if ( bp->thor )
163  thor_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ );
164  else
165  write32 ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) );
166 }
167 
168 static void bnxt_db_tx ( struct bnxt *bp, u32 idx )
169 {
170  if ( bp->thor )
171  thor_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ );
172  else
173  write32 ( ( u32 ) ( TX_DOORBELL_KEY_TX | idx ),
174  ( bp->bar1 + 0 ) );
175 }
176 
177 void bnxt_add_vlan ( struct io_buffer *iob, u16 vlan )
178 {
179  char *src = ( char * )iob->data;
180  u16 len = iob_len ( iob );
181 
182  memmove ( ( char * )&src[MAC_HDR_SIZE + VLAN_HDR_SIZE],
183  ( char * )&src[MAC_HDR_SIZE],
184  ( len - MAC_HDR_SIZE ) );
185 
186  * ( u16 * ) ( &src[MAC_HDR_SIZE] ) = BYTE_SWAP_S ( ETHERTYPE_VLAN );
187  * ( u16 * ) ( &src[MAC_HDR_SIZE + 2] ) = BYTE_SWAP_S ( vlan );
188  iob_put ( iob, VLAN_HDR_SIZE );
189 }
190 
191 static u16 bnxt_get_pkt_vlan ( char *src )
192 {
193  if ( * ( ( u16 * )&src[MAC_HDR_SIZE] ) == BYTE_SWAP_S ( ETHERTYPE_VLAN ) )
194  return BYTE_SWAP_S ( * ( ( u16 * )&src[MAC_HDR_SIZE + 2] ) );
195  return 0;
196 }
197 
198 int bnxt_vlan_drop ( struct bnxt *bp, u16 rx_vlan )
199 {
200  if ( rx_vlan ) {
201  if ( bp->vlan_tx ) {
202  if ( rx_vlan == bp->vlan_tx )
203  return 0;
204  } else {
205  if ( rx_vlan == bp->vlan_id )
206  return 0;
207  if ( rx_vlan && !bp->vlan_id )
208  return 0;
209  }
210  } else {
211  if ( !bp->vlan_tx && !bp->vlan_id )
212  return 0;
213  }
214 
215  return 1;
216 }
217 
218 static inline u32 bnxt_tx_avail ( struct bnxt *bp )
219 {
220  u32 avail;
221  u32 use;
222 
223  barrier ( );
224  avail = TX_AVAIL ( bp->tx.ring_cnt );
225  use = TX_IN_USE ( bp->tx.prod_id, bp->tx.cons_id, bp->tx.ring_cnt );
226  dbg_tx_avail ( bp, avail, use );
227  return ( avail-use );
228 }
229 
230 void bnxt_set_txq ( struct bnxt *bp, int entry, dma_addr_t mapping, int len )
231 {
232  struct tx_bd_short *prod_bd;
233 
234  prod_bd = ( struct tx_bd_short * )BD_NOW ( bp->tx.bd_virt,
235  entry, sizeof ( struct tx_bd_short ) );
236  if ( len < 512 )
238  else if ( len < 1024 )
240  else if ( len < 2048 )
242  else
244  prod_bd->flags_type |= TX_BD_FLAGS;
245  prod_bd->dma.addr = mapping;
246  prod_bd->len = len;
247  prod_bd->opaque = ( u32 )entry;
248 }
249 
250 static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx )
251 {
252  struct bnxt *bp = netdev_priv ( dev );
253  struct io_buffer *iob;
254 
255  iob = bp->tx.iob[hw_idx];
256  dbg_tx_done ( iob->data, iob_len ( iob ), hw_idx );
257  netdev_tx_complete ( dev, iob );
258  bp->tx.cons_id = NEXT_IDX ( hw_idx, bp->tx.ring_cnt );
259  bp->tx.cnt++;
260  dump_tx_stat ( bp );
261 }
262 
263 int bnxt_free_rx_iob ( struct bnxt *bp )
264 {
265  unsigned int i;
266 
267  DBGP ( "%s\n", __func__ );
268  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RX_IOB ) ) )
269  return STATUS_SUCCESS;
270 
271  for ( i = 0; i < bp->rx.buf_cnt; i++ ) {
272  if ( bp->rx.iob[i] ) {
273  free_iob ( bp->rx.iob[i] );
274  bp->rx.iob[i] = NULL;
275  }
276  }
277  bp->rx.iob_cnt = 0;
278 
279  FLAG_RESET ( bp->flag_hwrm, VALID_RX_IOB );
280  return STATUS_SUCCESS;
281 }
282 
283 static void bnxt_set_rx_desc ( u8 *buf, struct io_buffer *iob,
284  u16 cid, u32 idx )
285 {
286  struct rx_prod_pkt_bd *desc;
287  u16 off = cid * sizeof ( struct rx_prod_pkt_bd );
288 
289  desc = ( struct rx_prod_pkt_bd * )&buf[off];
292  desc->opaque = idx;
293  desc->dma.addr = virt_to_bus ( iob->data );
294 }
295 
296 static int bnxt_alloc_rx_iob ( struct bnxt *bp, u16 cons_id, u16 iob_idx )
297 {
298  struct io_buffer *iob;
299 
300  iob = alloc_iob ( BNXT_RX_STD_DMA_SZ );
301  if ( !iob ) {
302  DBGP ( "- %s ( ): alloc_iob Failed\n", __func__ );
303  return -ENOMEM;
304  }
305 
306  dbg_alloc_rx_iob ( iob, iob_idx, cons_id );
307  bnxt_set_rx_desc ( ( u8 * )bp->rx.bd_virt, iob, cons_id,
308  ( u32 ) iob_idx );
309  bp->rx.iob[iob_idx] = iob;
310  return 0;
311 }
312 
313 int bnxt_post_rx_buffers ( struct bnxt *bp )
314 {
315  u16 cons_id = ( bp->rx.cons_id % bp->rx.ring_cnt );
316  u16 iob_idx;
317 
318  while ( bp->rx.iob_cnt < bp->rx.buf_cnt ) {
319  iob_idx = ( cons_id % bp->rx.buf_cnt );
320  if ( !bp->rx.iob[iob_idx] ) {
321  if ( bnxt_alloc_rx_iob ( bp, cons_id, iob_idx ) < 0 ) {
322  dbg_alloc_rx_iob_fail ( iob_idx, cons_id );
323  break;
324  }
325  }
326  cons_id = NEXT_IDX ( cons_id, bp->rx.ring_cnt );
327  bp->rx.iob_cnt++;
328  }
329 
330  if ( cons_id != bp->rx.cons_id ) {
331  dbg_rx_cid ( bp->rx.cons_id, cons_id );
332  bp->rx.cons_id = cons_id;
333  bnxt_db_rx ( bp, ( u32 )cons_id );
334  }
335 
336  FLAG_SET ( bp->flag_hwrm, VALID_RX_IOB );
337  return STATUS_SUCCESS;
338 }
339 
340 u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob,
341  struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len )
342 {
343  u8 *rx_buf = ( u8 * )iob->data;
344  u16 err_flags, rx_vlan;
345  u8 ignore_chksum_err = 0;
346  int i;
347 
348  err_flags = rx_cmp_hi->errors_v2 >> RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT;
349  if ( rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21 )
350  ignore_chksum_err = 1;
351 
352  if ( err_flags && !ignore_chksum_err ) {
353  bp->rx.drop_err++;
354  return 1;
355  }
356 
357  for ( i = 0; i < 6; i++ ) {
358  if ( rx_buf[6 + i] != bp->mac_addr[i] )
359  break;
360  }
361 
362  /* Drop the loopback packets */
363  if ( i == 6 ) {
364  bp->rx.drop_lb++;
365  return 2;
366  }
367 
368  /* Get VLAN ID from RX completion ring */
369  if ( rx_cmp_hi->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN )
370  rx_vlan = ( rx_cmp_hi->metadata &
372  else
373  rx_vlan = 0;
374 
375  dbg_rx_vlan ( bp, rx_cmp_hi->metadata, rx_cmp_hi->flags2, rx_vlan );
376  if ( bnxt_vlan_drop ( bp, rx_vlan ) ) {
377  bp->rx.drop_vlan++;
378  return 3;
379  }
380  iob_put ( iob, rx_len );
381 
382  if ( rx_vlan )
383  bnxt_add_vlan ( iob, rx_vlan );
384 
385  bp->rx.good++;
386  return 0;
387 }
388 
389 static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt )
390 {
391  u16 cons_id;
392 
393  cons_id = bp->cq.cons_id + cnt;
394  if ( cons_id >= MAX_CQ_DESC_CNT ) {
395  /* Toggle completion bit when the ring wraps. */
396  bp->cq.completion_bit ^= 1;
397  cons_id = cons_id - MAX_CQ_DESC_CNT;
398  }
399  bp->cq.cons_id = cons_id;
400 }
401 
402 void bnxt_rx_process ( struct net_device *dev, struct bnxt *bp,
403  struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi )
404 {
405  u32 desc_idx = rx_cmp->opaque;
406  struct io_buffer *iob = bp->rx.iob[desc_idx];
407  u8 drop;
408 
409  dump_rx_bd ( rx_cmp, rx_cmp_hi, desc_idx );
410  assert ( !iob );
411  drop = bnxt_rx_drop ( bp, iob, rx_cmp_hi, rx_cmp->len );
412  dbg_rxp ( iob->data, rx_cmp->len, drop );
413  if ( drop )
414  netdev_rx_err ( dev, iob, -EINVAL );
415  else
416  netdev_rx ( dev, iob );
417 
418  bp->rx.cnt++;
419  bp->rx.iob[desc_idx] = NULL;
420  bp->rx.iob_cnt--;
422  bnxt_adv_cq_index ( bp, 2 ); /* Rx completion is 2 entries. */
423  dbg_rx_stat ( bp );
424 }
425 
426 static int bnxt_rx_complete ( struct net_device *dev,
427  struct rx_pkt_cmpl *rx_cmp )
428 {
429  struct bnxt *bp = netdev_priv ( dev );
430  struct rx_pkt_cmpl_hi *rx_cmp_hi;
431  u8 cmpl_bit = bp->cq.completion_bit;
432 
433  if ( bp->cq.cons_id == ( bp->cq.ring_cnt - 1 ) ) {
434  rx_cmp_hi = ( struct rx_pkt_cmpl_hi * )bp->cq.bd_virt;
435  cmpl_bit ^= 0x1; /* Ring has wrapped. */
436  } else
437  rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) ( rx_cmp+1 );
438 
439  if ( ! ( ( rx_cmp_hi->errors_v2 & RX_PKT_CMPL_V2 ) ^ cmpl_bit ) ) {
440  bnxt_rx_process ( dev, bp, rx_cmp, rx_cmp_hi );
441  return SERVICE_NEXT_CQ_BD;
442  } else
444 }
445 
446 void bnxt_mm_init ( struct bnxt *bp, const char *func )
447 {
448  DBGP ( "%s\n", __func__ );
449  memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
450  memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
451  memset ( bp->hwrm_addr_dma, 0, DMA_BUFFER_SIZE );
452  bp->req_addr_mapping = virt_to_bus ( bp->hwrm_addr_req );
453  bp->resp_addr_mapping = virt_to_bus ( bp->hwrm_addr_resp );
454  bp->dma_addr_mapping = virt_to_bus ( bp->hwrm_addr_dma );
455  bp->link_status = STATUS_LINK_DOWN;
456  bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
458  bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
459  bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
460  bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
461  bp->tx.ring_cnt = MAX_TX_DESC_CNT;
462  bp->rx.ring_cnt = MAX_RX_DESC_CNT;
463  bp->rx.buf_cnt = NUM_RX_BUFFERS;
464  dbg_mem ( bp, func );
465 }
466 
467 void bnxt_mm_nic ( struct bnxt *bp )
468 {
469  DBGP ( "%s\n", __func__ );
470  memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE );
471  memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE );
472  memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE );
473  memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE );
474  bp->nq.cons_id = 0;
475  bp->nq.completion_bit = 0x1;
476  bp->cq.cons_id = 0;
477  bp->cq.completion_bit = 0x1;
478  bp->tx.prod_id = 0;
479  bp->tx.cons_id = 0;
480  bp->rx.cons_id = 0;
481  bp->rx.iob_cnt = 0;
482 
483  bp->link_status = STATUS_LINK_DOWN;
484  bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
486  bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
487  bp->nq.ring_cnt = MAX_NQ_DESC_CNT;
488  bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
489  bp->tx.ring_cnt = MAX_TX_DESC_CNT;
490  bp->rx.ring_cnt = MAX_RX_DESC_CNT;
491  bp->rx.buf_cnt = NUM_RX_BUFFERS;
492 }
493 
494 void bnxt_free_mem ( struct bnxt *bp )
495 {
496  DBGP ( "%s\n", __func__ );
497  if ( bp->nq.bd_virt ) {
498  free_dma ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
499  bp->nq.bd_virt = NULL;
500  }
501 
502  if ( bp->cq.bd_virt ) {
503  free_dma ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
504  bp->cq.bd_virt = NULL;
505  }
506 
507  if ( bp->rx.bd_virt ) {
508  free_dma ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
509  bp->rx.bd_virt = NULL;
510  }
511 
512  if ( bp->tx.bd_virt ) {
513  free_dma ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
514  bp->tx.bd_virt = NULL;
515  }
516 
517  if ( bp->hwrm_addr_dma ) {
518  free_dma ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
519  bp->dma_addr_mapping = 0;
520  bp->hwrm_addr_dma = NULL;
521  }
522 
523  if ( bp->hwrm_addr_resp ) {
524  free_dma ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
525  bp->resp_addr_mapping = 0;
526  bp->hwrm_addr_resp = NULL;
527  }
528 
529  if ( bp->hwrm_addr_req ) {
530  free_dma ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
531  bp->req_addr_mapping = 0;
532  bp->hwrm_addr_req = NULL;
533  }
534  DBGP ( "- %s ( ): - Done\n", __func__ );
535 }
536 
537 int bnxt_alloc_mem ( struct bnxt *bp )
538 {
539  DBGP ( "%s\n", __func__ );
540  bp->hwrm_addr_req = malloc_dma ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
541  bp->hwrm_addr_resp = malloc_dma ( RESP_BUFFER_SIZE,
543  bp->hwrm_addr_dma = malloc_dma ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
544  bp->tx.bd_virt = malloc_dma ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
545  bp->rx.bd_virt = malloc_dma ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
548  test_if ( bp->hwrm_addr_req &&
549  bp->hwrm_addr_resp &&
550  bp->hwrm_addr_dma &&
551  bp->tx.bd_virt &&
552  bp->rx.bd_virt &&
553  bp->nq.bd_virt &&
554  bp->cq.bd_virt ) {
555  bnxt_mm_init ( bp, __func__ );
556  return STATUS_SUCCESS;
557  }
558 
559  DBGP ( "- %s ( ): Failed\n", __func__ );
560  bnxt_free_mem ( bp );
561  return -ENOMEM;
562 }
563 
564 static void hwrm_init ( struct bnxt *bp, struct input *req, u16 cmd, u16 len )
565 {
566  memset ( req, 0, len );
567  req->req_type = cmd;
568  req->cmpl_ring = ( u16 )HWRM_NA_SIGNATURE;
569  req->target_id = ( u16 )HWRM_NA_SIGNATURE;
570  req->resp_addr = bp->resp_addr_mapping;
571  req->seq_id = bp->seq_id++;
572 }
573 
574 static void hwrm_write_req ( struct bnxt *bp, void *req, u32 cnt )
575 {
576  u32 i = 0;
577 
578  for ( i = 0; i < cnt; i++ ) {
579  write32 ( ( ( u32 * )req )[i],
580  ( bp->bar0 + GRC_COM_CHAN_BASE + ( i * 4 ) ) );
581  }
582  write32 ( 0x1, ( bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG ) );
583 }
584 
585 static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len )
586 {
587  struct hwrm_short_input sreq;
588 
589  memset ( &sreq, 0, sizeof ( struct hwrm_short_input ) );
590  sreq.req_type = ( u16 ) ( ( struct input * )bp->hwrm_addr_req )->req_type;
592  sreq.size = len;
593  sreq.req_addr = bp->req_addr_mapping;
594  mdelay ( 100 );
595  dbg_short_cmd ( ( u8 * )&sreq, __func__,
596  sizeof ( struct hwrm_short_input ) );
597  hwrm_write_req ( bp, &sreq, sizeof ( struct hwrm_short_input ) / 4 );
598 }
599 
600 static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func )
601 {
602  struct input *req = ( struct input * )bp->hwrm_addr_req;
603  struct output *resp = ( struct output * )bp->hwrm_addr_resp;
604  u8 *ptr = ( u8 * )resp;
605  u32 idx;
606  u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER ( ( u32 )tmo );
607  u16 resp_len = 0;
608  u16 ret = STATUS_TIMEOUT;
609 
610  if ( len > bp->hwrm_max_req_len )
612  else
613  hwrm_write_req ( bp, req, ( u32 ) ( len / 4 ) );
614 
615  for ( idx = 0; idx < wait_cnt; idx++ ) {
616  resp_len = resp->resp_len;
617  test_if ( resp->seq_id == req->seq_id &&
618  resp->req_type == req->req_type &&
619  ptr[resp_len - 1] == 1 ) {
620  bp->last_resp_code = resp->error_code;
621  ret = resp->error_code;
622  break;
623  }
625  }
626  dbg_hw_cmd ( bp, func, len, resp_len, tmo, ret );
627  return ( int )ret;
628 }
629 
630 static int bnxt_hwrm_ver_get ( struct bnxt *bp )
631 {
632  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ver_get_input );
633  struct hwrm_ver_get_input *req;
634  struct hwrm_ver_get_output *resp;
635  int rc;
636 
637  DBGP ( "%s\n", __func__ );
638  req = ( struct hwrm_ver_get_input * )bp->hwrm_addr_req;
639  resp = ( struct hwrm_ver_get_output * )bp->hwrm_addr_resp;
640  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VER_GET, cmd_len );
641  req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
642  req->hwrm_intf_min = HWRM_VERSION_MINOR;
643  req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
644  rc = wait_resp ( bp, HWRM_CMD_DEFAULT_TIMEOUT, cmd_len, __func__ );
645  if ( rc )
646  return STATUS_FAILURE;
647 
648  bp->hwrm_spec_code =
649  resp->hwrm_intf_maj_8b << 16 |
650  resp->hwrm_intf_min_8b << 8 |
651  resp->hwrm_intf_upd_8b;
652  bp->hwrm_cmd_timeout = ( u32 )resp->def_req_timeout;
653  if ( !bp->hwrm_cmd_timeout )
654  bp->hwrm_cmd_timeout = ( u32 )HWRM_CMD_DEFAULT_TIMEOUT;
655  if ( resp->hwrm_intf_maj_8b >= 1 )
656  bp->hwrm_max_req_len = resp->max_req_win_len;
657  bp->chip_id =
658  resp->chip_rev << 24 |
659  resp->chip_metal << 16 |
660  resp->chip_bond_id << 8 |
661  resp->chip_platform_type;
662  bp->chip_num = resp->chip_num;
663  test_if ( ( resp->dev_caps_cfg & SHORT_CMD_SUPPORTED ) &&
664  ( resp->dev_caps_cfg & SHORT_CMD_REQUIRED ) )
666  bp->hwrm_max_ext_req_len = resp->max_ext_req_len;
667  if ( bp->chip_num == CHIP_NUM_57500 )
668  bp->thor = 1;
669  dbg_fw_ver ( resp, bp->hwrm_cmd_timeout );
670  return STATUS_SUCCESS;
671 }
672 
673 static int bnxt_hwrm_func_resource_qcaps ( struct bnxt *bp )
674 {
675  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_resource_qcaps_input );
676  struct hwrm_func_resource_qcaps_input *req;
677  struct hwrm_func_resource_qcaps_output *resp;
678  int rc;
679 
680  DBGP ( "%s\n", __func__ );
681  req = ( struct hwrm_func_resource_qcaps_input * )bp->hwrm_addr_req;
682  resp = ( struct hwrm_func_resource_qcaps_output * )bp->hwrm_addr_resp;
683  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESOURCE_QCAPS,
684  cmd_len );
685  req->fid = ( u16 )HWRM_NA_SIGNATURE;
686  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
687  if ( rc != STATUS_SUCCESS )
688  return STATUS_SUCCESS;
689 
691 
692  // VFs
693  if ( !bp->vf ) {
694  bp->max_vfs = resp->max_vfs;
695  bp->vf_res_strategy = resp->vf_reservation_strategy;
696  }
697 
698  // vNICs
699  bp->min_vnics = resp->min_vnics;
700  bp->max_vnics = resp->max_vnics;
701 
702  // MSI-X
703  bp->max_msix = resp->max_msix;
704 
705  // Ring Groups
706  bp->min_hw_ring_grps = resp->min_hw_ring_grps;
707  bp->max_hw_ring_grps = resp->max_hw_ring_grps;
708 
709  // TX Rings
710  bp->min_tx_rings = resp->min_tx_rings;
711  bp->max_tx_rings = resp->max_tx_rings;
712 
713  // RX Rings
714  bp->min_rx_rings = resp->min_rx_rings;
715  bp->max_rx_rings = resp->max_rx_rings;
716 
717  // Completion Rings
718  bp->min_cp_rings = resp->min_cmpl_rings;
719  bp->max_cp_rings = resp->max_cmpl_rings;
720 
721  // RSS Contexts
722  bp->min_rsscos_ctxs = resp->min_rsscos_ctx;
723  bp->max_rsscos_ctxs = resp->max_rsscos_ctx;
724 
725  // L2 Contexts
726  bp->min_l2_ctxs = resp->min_l2_ctxs;
727  bp->max_l2_ctxs = resp->max_l2_ctxs;
728 
729  // Statistic Contexts
730  bp->min_stat_ctxs = resp->min_stat_ctx;
731  bp->max_stat_ctxs = resp->max_stat_ctx;
733  return STATUS_SUCCESS;
734 }
735 
736 static u32 bnxt_set_ring_info ( struct bnxt *bp )
737 {
738  u32 enables = 0;
739 
740  DBGP ( "%s\n", __func__ );
741  bp->num_cmpl_rings = DEFAULT_NUMBER_OF_CMPL_RINGS;
742  bp->num_tx_rings = DEFAULT_NUMBER_OF_TX_RINGS;
743  bp->num_rx_rings = DEFAULT_NUMBER_OF_RX_RINGS;
744  bp->num_hw_ring_grps = DEFAULT_NUMBER_OF_RING_GRPS;
745  bp->num_stat_ctxs = DEFAULT_NUMBER_OF_STAT_CTXS;
746 
747  if ( bp->min_cp_rings <= DEFAULT_NUMBER_OF_CMPL_RINGS )
748  bp->num_cmpl_rings = bp->min_cp_rings;
749 
750  if ( bp->min_tx_rings <= DEFAULT_NUMBER_OF_TX_RINGS )
751  bp->num_tx_rings = bp->min_tx_rings;
752 
753  if ( bp->min_rx_rings <= DEFAULT_NUMBER_OF_RX_RINGS )
754  bp->num_rx_rings = bp->min_rx_rings;
755 
756  if ( bp->min_hw_ring_grps <= DEFAULT_NUMBER_OF_RING_GRPS )
757  bp->num_hw_ring_grps = bp->min_hw_ring_grps;
758 
759  if ( bp->min_stat_ctxs <= DEFAULT_NUMBER_OF_STAT_CTXS )
760  bp->num_stat_ctxs = bp->min_stat_ctxs;
761 
762  dbg_num_rings ( bp );
768  return enables;
769 }
770 
771 static void bnxt_hwrm_assign_resources ( struct bnxt *bp )
772 {
773  struct hwrm_func_cfg_input *req;
774  u32 enables = 0;
775 
776  DBGP ( "%s\n", __func__ );
779 
780  req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
781  req->num_cmpl_rings = bp->num_cmpl_rings;
782  req->num_tx_rings = bp->num_tx_rings;
783  req->num_rx_rings = bp->num_rx_rings;
784  req->num_stat_ctxs = bp->num_stat_ctxs;
785  req->num_hw_ring_grps = bp->num_hw_ring_grps;
786  req->enables = enables;
787 }
788 
789 static int bnxt_hwrm_func_qcaps_req ( struct bnxt *bp )
790 {
791  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcaps_input );
792  struct hwrm_func_qcaps_input *req;
793  struct hwrm_func_qcaps_output *resp;
794  int rc;
795 
796  DBGP ( "%s\n", __func__ );
797  if ( bp->vf )
798  return STATUS_SUCCESS;
799 
800  req = ( struct hwrm_func_qcaps_input * )bp->hwrm_addr_req;
801  resp = ( struct hwrm_func_qcaps_output * )bp->hwrm_addr_resp;
802  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCAPS, cmd_len );
803  req->fid = ( u16 )HWRM_NA_SIGNATURE;
804  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
805  if ( rc ) {
806  DBGP ( "- %s ( ): Failed\n", __func__ );
807  return STATUS_FAILURE;
808  }
809 
810  bp->fid = resp->fid;
811  bp->port_idx = ( u8 )resp->port_id;
812 
813  /* Get MAC address for this PF */
814  memcpy ( &bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN );
815  dbg_func_qcaps ( bp );
816  return STATUS_SUCCESS;
817 }
818 
819 static int bnxt_hwrm_func_qcfg_req ( struct bnxt *bp )
820 {
821  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcfg_input );
822  struct hwrm_func_qcfg_input *req;
823  struct hwrm_func_qcfg_output *resp;
824  int rc;
825 
826  DBGP ( "%s\n", __func__ );
827  req = ( struct hwrm_func_qcfg_input * )bp->hwrm_addr_req;
828  resp = ( struct hwrm_func_qcfg_output * )bp->hwrm_addr_resp;
829  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCFG, cmd_len );
830  req->fid = ( u16 )HWRM_NA_SIGNATURE;
831  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
832  if ( rc ) {
833  DBGP ( "- %s ( ): Failed\n", __func__ );
834  return STATUS_FAILURE;
835  }
836 
838  FLAG_SET ( bp->flags, BNXT_FLAG_MULTI_HOST );
839 
840  if ( resp->port_partition_type &
842  FLAG_SET ( bp->flags, BNXT_FLAG_NPAR_MODE );
843 
844  bp->ordinal_value = ( u8 )resp->pci_id & 0x0F;
845  bp->stat_ctx_id = resp->stat_ctx_id;
846 
847  /* If VF is set to TRUE, then use some data from func_qcfg ( ). */
848  if ( bp->vf ) {
849  bp->fid = resp->fid;
850  bp->port_idx = ( u8 )resp->port_id;
851  bp->vlan_id = resp->vlan;
852 
853  /* Get MAC address for this VF */
854  memcpy ( bp->mac_addr, resp->mac_address, ETH_ALEN );
855  }
856  dbg_func_qcfg ( bp );
857  return STATUS_SUCCESS;
858 }
859 
860 static int bnxt_hwrm_func_reset_req ( struct bnxt *bp )
861 {
862  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_reset_input );
863  struct hwrm_func_reset_input *req;
864 
865  DBGP ( "%s\n", __func__ );
866  req = ( struct hwrm_func_reset_input * )bp->hwrm_addr_req;
867  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESET, cmd_len );
868  if ( !bp->vf )
870 
871  return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
872 }
873 
874 static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp )
875 {
876  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input );
877  struct hwrm_func_cfg_input *req;
878 
879  DBGP ( "%s\n", __func__ );
880  if ( bp->vf )
881  return STATUS_SUCCESS;
882 
883  req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
884  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
885  req->fid = ( u16 )HWRM_NA_SIGNATURE;
887  if ( bp->thor ) {
891  req->num_msix = 1;
892  req->num_vnics = 1;
894  }
895  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
896 }
897 
898 static int bnxt_hwrm_func_drv_rgtr ( struct bnxt *bp )
899 {
900  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_rgtr_input );
901  struct hwrm_func_drv_rgtr_input *req;
902  int rc;
903 
904  DBGP ( "%s\n", __func__ );
905  req = ( struct hwrm_func_drv_rgtr_input * )bp->hwrm_addr_req;
906  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_RGTR, cmd_len );
907 
908  /* Register with HWRM */
912  req->async_event_fwd[0] |= 0x01;
917  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
918  if ( rc ) {
919  DBGP ( "- %s ( ): Failed\n", __func__ );
920  return STATUS_FAILURE;
921  }
922 
923  FLAG_SET ( bp->flag_hwrm, VALID_DRIVER_REG );
924  return STATUS_SUCCESS;
925 }
926 
927 static int bnxt_hwrm_func_drv_unrgtr ( struct bnxt *bp )
928 {
929  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_unrgtr_input );
930  struct hwrm_func_drv_unrgtr_input *req;
931  int rc;
932 
933  DBGP ( "%s\n", __func__ );
934  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_DRIVER_REG ) ) )
935  return STATUS_SUCCESS;
936 
937  req = ( struct hwrm_func_drv_unrgtr_input * )bp->hwrm_addr_req;
938  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_UNRGTR, cmd_len );
940  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
941  if ( rc )
942  return STATUS_FAILURE;
943 
944  FLAG_RESET ( bp->flag_hwrm, VALID_DRIVER_REG );
945  return STATUS_SUCCESS;
946 }
947 
948 static int bnxt_hwrm_set_async_event ( struct bnxt *bp )
949 {
950  int rc;
951  u16 idx;
952 
953  DBGP ( "%s\n", __func__ );
954  if ( bp->thor )
955  idx = bp->nq_ring_id;
956  else
957  idx = bp->cq_ring_id;
958  if ( bp->vf ) {
959  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_vf_cfg_input );
960  struct hwrm_func_vf_cfg_input *req;
961 
962  req = ( struct hwrm_func_vf_cfg_input * )bp->hwrm_addr_req;
963  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_VF_CFG,
964  cmd_len );
966  req->async_event_cr = idx;
967  req->mtu = bp->mtu;
968  req->guest_vlan = bp->vlan_id;
969  memcpy ( ( char * )&req->dflt_mac_addr[0], bp->mac_addr,
970  ETH_ALEN );
971  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
972  } else {
973  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input );
974  struct hwrm_func_cfg_input *req;
975 
976  req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
977  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
978  req->fid = ( u16 )HWRM_NA_SIGNATURE;
980  req->async_event_cr = idx;
981  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
982  }
983  return rc;
984 }
985 
986 static int bnxt_hwrm_cfa_l2_filter_alloc ( struct bnxt *bp )
987 {
988  u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_alloc_input );
989  struct hwrm_cfa_l2_filter_alloc_input *req;
990  struct hwrm_cfa_l2_filter_alloc_output *resp;
991  int rc;
993  u32 enables;
994 
995  DBGP ( "%s\n", __func__ );
996  req = ( struct hwrm_cfa_l2_filter_alloc_input * )bp->hwrm_addr_req;
997  resp = ( struct hwrm_cfa_l2_filter_alloc_output * )bp->hwrm_addr_resp;
998  if ( bp->vf )
1003 
1004  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_ALLOC,
1005  cmd_len );
1006  req->flags = flags;
1007  req->enables = enables;
1008  memcpy ( ( char * )&req->l2_addr[0], ( char * )&bp->mac_addr[0],
1009  ETH_ALEN );
1010  memset ( ( char * )&req->l2_addr_mask[0], 0xff, ETH_ALEN );
1011  if ( !bp->vf ) {
1012  memcpy ( ( char * )&req->t_l2_addr[0], bp->mac_addr, ETH_ALEN );
1013  memset ( ( char * )&req->t_l2_addr_mask[0], 0xff, ETH_ALEN );
1014  }
1016  req->src_id = ( u32 )bp->port_idx;
1017  req->dst_id = bp->vnic_id;
1018  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1019  if ( rc )
1020  return STATUS_FAILURE;
1021 
1022  FLAG_SET ( bp->flag_hwrm, VALID_L2_FILTER );
1023  bp->l2_filter_id = resp->l2_filter_id;
1024  return STATUS_SUCCESS;
1025 }
1026 
1027 static int bnxt_hwrm_cfa_l2_filter_free ( struct bnxt *bp )
1028 {
1029  u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_free_input );
1030  struct hwrm_cfa_l2_filter_free_input *req;
1031  int rc;
1032 
1033  DBGP ( "%s\n", __func__ );
1034  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_L2_FILTER ) ) )
1035  return STATUS_SUCCESS;
1036 
1037  req = ( struct hwrm_cfa_l2_filter_free_input * )bp->hwrm_addr_req;
1038  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_FREE,
1039  cmd_len );
1040  req->l2_filter_id = bp->l2_filter_id;
1041  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1042  if ( rc ) {
1043  DBGP ( "- %s ( ): Failed\n", __func__ );
1044  return STATUS_FAILURE;
1045  }
1046 
1047  FLAG_RESET ( bp->flag_hwrm, VALID_L2_FILTER );
1048  return STATUS_SUCCESS;
1049 }
1050 
1051 u32 set_rx_mask ( u32 rx_mask )
1052 {
1053  u32 mask = 0;
1054 
1055  if ( !rx_mask )
1056  return mask;
1057 
1059  if ( rx_mask != RX_MASK_ACCEPT_NONE ) {
1060  if ( rx_mask & RX_MASK_ACCEPT_MULTICAST )
1062  if ( rx_mask & RX_MASK_ACCEPT_ALL_MULTICAST )
1064  if ( rx_mask & RX_MASK_PROMISCUOUS_MODE )
1066  }
1067  return mask;
1068 }
1069 
1070 static int bnxt_hwrm_set_rx_mask ( struct bnxt *bp, u32 rx_mask )
1071 {
1072  u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_set_rx_mask_input );
1073  struct hwrm_cfa_l2_set_rx_mask_input *req;
1074  u32 mask = set_rx_mask ( rx_mask );
1075 
1076  req = ( struct hwrm_cfa_l2_set_rx_mask_input * )bp->hwrm_addr_req;
1077  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_SET_RX_MASK,
1078  cmd_len );
1079  req->vnic_id = bp->vnic_id;
1080  req->mask = mask;
1081 
1082  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1083 }
1084 
1085 static int bnxt_hwrm_port_phy_qcfg ( struct bnxt *bp, u16 idx )
1086 {
1087  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcfg_input );
1088  struct hwrm_port_phy_qcfg_input *req;
1089  struct hwrm_port_phy_qcfg_output *resp;
1090  int rc;
1091 
1092  DBGP ( "%s\n", __func__ );
1093  req = ( struct hwrm_port_phy_qcfg_input * )bp->hwrm_addr_req;
1094  resp = ( struct hwrm_port_phy_qcfg_output * )bp->hwrm_addr_resp;
1095  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCFG, cmd_len );
1096  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1097  if ( rc ) {
1098  DBGP ( "- %s ( ): Failed\n", __func__ );
1099  return STATUS_FAILURE;
1100  }
1101 
1102  if ( idx & SUPPORT_SPEEDS )
1103  bp->support_speeds = resp->support_speeds;
1104 
1105  if ( idx & DETECT_MEDIA )
1106  bp->media_detect = resp->module_status;
1107 
1108  if ( idx & PHY_SPEED )
1109  bp->current_link_speed = resp->link_speed;
1110 
1111  if ( idx & PHY_STATUS ) {
1112  if ( resp->link == PORT_PHY_QCFG_RESP_LINK_LINK )
1113  bp->link_status = STATUS_LINK_ACTIVE;
1114  else
1115  bp->link_status = STATUS_LINK_DOWN;
1116  }
1117  return STATUS_SUCCESS;
1118 }
1119 
1121  u16 data_len, u16 option_num, u16 dimensions, u16 index_0 )
1122 {
1123  u16 cmd_len = ( u16 )sizeof ( struct hwrm_nvm_get_variable_input );
1124  struct hwrm_nvm_get_variable_input *req;
1125 
1126  DBGP ( "%s\n", __func__ );
1127  req = ( struct hwrm_nvm_get_variable_input * )bp->hwrm_addr_req;
1128  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_NVM_GET_VARIABLE, cmd_len );
1129  req->dest_data_addr = bp->dma_addr_mapping;
1130  req->data_len = data_len;
1131  req->option_num = option_num;
1132  req->dimensions = dimensions;
1133  req->index_0 = index_0;
1134  return wait_resp ( bp,
1135  HWRM_CMD_FLASH_MULTIPLAYER ( bp->hwrm_cmd_timeout ),
1136  cmd_len, __func__ );
1137 }
1138 
1139 static int bnxt_get_link_speed ( struct bnxt *bp )
1140 {
1141  u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma;
1142 
1143  DBGP ( "%s\n", __func__ );
1146  1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1147  return STATUS_FAILURE;
1148  bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT );
1151  1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1152  return STATUS_FAILURE;
1153  bp->link_set |= SET_LINK ( *ptr32, SPEED_FW_MASK, SPEED_FW_SHIFT );
1155  ( u16 )D3_LINK_SPEED_FW_NUM, 1,
1156  ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1157  return STATUS_FAILURE;
1158  bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK,
1162  1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
1163  return STATUS_FAILURE;
1164  bp->link_set |= SET_LINK ( *ptr32,
1166 
1167  switch ( bp->link_set & LINK_SPEED_DRV_MASK ) {
1168  case LINK_SPEED_DRV_1G:
1170  break;
1171  case LINK_SPEED_DRV_2_5G:
1173  break;
1174  case LINK_SPEED_DRV_10G:
1175  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_10GBPS );
1176  break;
1177  case LINK_SPEED_DRV_25G:
1178  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_25GBPS );
1179  break;
1180  case LINK_SPEED_DRV_40G:
1181  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_40GBPS );
1182  break;
1183  case LINK_SPEED_DRV_50G:
1184  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_50GBPS );
1185  break;
1186  case LINK_SPEED_DRV_100G:
1187  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_100GBPS );
1188  break;
1189  case LINK_SPEED_DRV_200G:
1190  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_200GBPS );
1191  break;
1193  bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_AUTONEG );
1194  break;
1195  default:
1196  bp->medium = SET_MEDIUM_DUPLEX ( bp, MEDIUM_FULL_DUPLEX );
1197  break;
1198  }
1199  prn_set_speed ( bp->link_set );
1200  return STATUS_SUCCESS;
1201 }
1202 
1203 static int bnxt_get_vlan ( struct bnxt *bp )
1204 {
1205  u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma;
1206 
1207  /* If VF is set to TRUE, Do not issue this command */
1208  if ( bp->vf )
1209  return STATUS_SUCCESS;
1210 
1213  ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
1214  return STATUS_FAILURE;
1215 
1216  bp->mba_cfg2 = SET_MBA ( *ptr32, VLAN_MASK, VLAN_SHIFT );
1219  ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
1220  return STATUS_FAILURE;
1221 
1222  bp->mba_cfg2 |= SET_MBA ( *ptr32, VLAN_VALUE_MASK, VLAN_VALUE_SHIFT );
1223  if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
1224  bp->vlan_id = bp->mba_cfg2 & VLAN_VALUE_MASK;
1225  else
1226  bp->vlan_id = 0;
1227 
1228  if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
1229  DBGP ( "VLAN MBA Enabled ( %d )\n",
1230  ( bp->mba_cfg2 & VLAN_VALUE_MASK ) );
1231 
1232  return STATUS_SUCCESS;
1233 }
1234 
1235 static int bnxt_hwrm_backing_store_qcfg ( struct bnxt *bp )
1236 {
1237  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_qcfg_input );
1239 
1240  DBGP ( "%s\n", __func__ );
1241  if ( !bp->thor )
1242  return STATUS_SUCCESS;
1243 
1244  req = ( struct hwrm_func_backing_store_qcfg_input * )bp->hwrm_addr_req;
1245  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_QCFG,
1246  cmd_len );
1247  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1248 }
1249 
1250 static int bnxt_hwrm_backing_store_cfg ( struct bnxt *bp )
1251 {
1252  u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_cfg_input );
1254 
1255  DBGP ( "%s\n", __func__ );
1256  if ( !bp->thor )
1257  return STATUS_SUCCESS;
1258 
1259  req = ( struct hwrm_func_backing_store_cfg_input * )bp->hwrm_addr_req;
1260  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_CFG,
1261  cmd_len );
1263  req->enables = 0;
1264  return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ );
1265 }
1266 
1267 static int bnxt_hwrm_queue_qportcfg ( struct bnxt *bp )
1268 {
1269  u16 cmd_len = ( u16 )sizeof ( struct hwrm_queue_qportcfg_input );
1270  struct hwrm_queue_qportcfg_input *req;
1271  struct hwrm_queue_qportcfg_output *resp;
1272  int rc;
1273 
1274  DBGP ( "%s\n", __func__ );
1275  if ( !bp->thor )
1276  return STATUS_SUCCESS;
1277 
1278  req = ( struct hwrm_queue_qportcfg_input * )bp->hwrm_addr_req;
1279  resp = ( struct hwrm_queue_qportcfg_output * )bp->hwrm_addr_resp;
1280  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_QUEUE_QPORTCFG, cmd_len );
1281  req->flags = 0;
1282  req->port_id = 0;
1283  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1284  if ( rc ) {
1285  DBGP ( "- %s ( ): Failed\n", __func__ );
1286  return STATUS_FAILURE;
1287  }
1288 
1289  bp->queue_id = resp->queue_id0;
1290  return STATUS_SUCCESS;
1291 }
1292 
1293 static int bnxt_hwrm_port_mac_cfg ( struct bnxt *bp )
1294 {
1295  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_mac_cfg_input );
1296  struct hwrm_port_mac_cfg_input *req;
1297 
1298  DBGP ( "%s\n", __func__ );
1299  if ( bp->vf )
1300  return STATUS_SUCCESS;
1301 
1302  req = ( struct hwrm_port_mac_cfg_input * )bp->hwrm_addr_req;
1303  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_MAC_CFG, cmd_len );
1305  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1306 }
1307 
1308 static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp )
1309 {
1310  u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_cfg_input );
1311  struct hwrm_port_phy_cfg_input *req;
1312  u32 flags;
1313  u32 enables = 0;
1314  u16 force_link_speed = 0;
1316  u8 auto_mode = 0;
1317  u8 auto_pause = 0;
1318  u8 auto_duplex = 0;
1319 
1320  DBGP ( "%s\n", __func__ );
1321  req = ( struct hwrm_port_phy_cfg_input * )bp->hwrm_addr_req;
1324 
1325  switch ( GET_MEDIUM_SPEED ( bp->medium ) ) {
1326  case MEDIUM_SPEED_1000MBPS:
1328  break;
1329  case MEDIUM_SPEED_10GBPS:
1331  break;
1332  case MEDIUM_SPEED_25GBPS:
1334  break;
1335  case MEDIUM_SPEED_40GBPS:
1337  break;
1338  case MEDIUM_SPEED_50GBPS:
1340  break;
1341  case MEDIUM_SPEED_100GBPS:
1343  break;
1344  case MEDIUM_SPEED_200GBPS:
1346  break;
1347  default:
1357  auto_link_speed_mask = bp->support_speeds;
1358  break;
1359  }
1360 
1361  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_CFG, cmd_len );
1362  req->flags = flags;
1363  req->enables = enables;
1364  req->port_id = bp->port_idx;
1365  req->force_link_speed = force_link_speed;
1366  req->auto_mode = auto_mode;
1367  req->auto_duplex = auto_duplex;
1368  req->auto_pause = auto_pause;
1369  req->auto_link_speed_mask = auto_link_speed_mask;
1370 
1371  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1372 }
1373 
1374 static int bnxt_query_phy_link ( struct bnxt *bp )
1375 {
1377 
1378  DBGP ( "%s\n", __func__ );
1379  /* Query Link Status */
1381  return STATUS_FAILURE;
1382  }
1383 
1384  if ( bp->link_status == STATUS_LINK_ACTIVE )
1385  return STATUS_SUCCESS;
1386 
1387  /* If VF is set to TRUE, Do not issue the following commands */
1388  if ( bp->vf )
1389  return STATUS_SUCCESS;
1390 
1391  /* If multi_host or NPAR, Do not issue bnxt_get_link_speed */
1392  if ( FLAG_TEST ( bp->flags, PORT_PHY_FLAGS ) ) {
1393  dbg_flags ( __func__, bp->flags );
1394  return STATUS_SUCCESS;
1395  }
1396 
1397  /* HWRM_NVM_GET_VARIABLE - speed */
1398  if ( bnxt_get_link_speed ( bp ) != STATUS_SUCCESS ) {
1399  return STATUS_FAILURE;
1400  }
1401 
1402  /* Configure link if it is not up */
1404 
1405  /* refresh link speed values after bringing link up */
1406  return bnxt_hwrm_port_phy_qcfg ( bp, flag );
1407 }
1408 
1409 static int bnxt_get_phy_link ( struct bnxt *bp )
1410 {
1411  u16 i;
1413 
1414  DBGP ( "%s\n", __func__ );
1415  dbg_chip_info ( bp );
1416  for ( i = 0; i < ( bp->wait_link_timeout / 100 ); i++ ) {
1418  break;
1419 
1420  if ( bp->link_status == STATUS_LINK_ACTIVE )
1421  break;
1422 
1423 // if ( bp->media_detect )
1424 // break;
1426  }
1427  dbg_link_state ( bp, ( u32 ) ( ( i + 1 ) * 100 ) );
1428  bnxt_set_link ( bp );
1429  return STATUS_SUCCESS;
1430 }
1431 
1432 static int bnxt_hwrm_stat_ctx_alloc ( struct bnxt *bp )
1433 {
1434  u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_alloc_input );
1435  struct hwrm_stat_ctx_alloc_input *req;
1436  struct hwrm_stat_ctx_alloc_output *resp;
1437  int rc;
1438 
1439  DBGP ( "%s\n", __func__ );
1440  req = ( struct hwrm_stat_ctx_alloc_input * )bp->hwrm_addr_req;
1441  resp = ( struct hwrm_stat_ctx_alloc_output * )bp->hwrm_addr_resp;
1442  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_ALLOC, cmd_len );
1443  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1444  if ( rc ) {
1445  DBGP ( "- %s ( ): Failed\n", __func__ );
1446  return STATUS_FAILURE;
1447  }
1448 
1449  FLAG_SET ( bp->flag_hwrm, VALID_STAT_CTX );
1450  bp->stat_ctx_id = ( u16 )resp->stat_ctx_id;
1451  return STATUS_SUCCESS;
1452 }
1453 
1454 static int bnxt_hwrm_stat_ctx_free ( struct bnxt *bp )
1455 {
1456  u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_free_input );
1457  struct hwrm_stat_ctx_free_input *req;
1458  int rc;
1459 
1460  DBGP ( "%s\n", __func__ );
1461  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_STAT_CTX ) ) )
1462  return STATUS_SUCCESS;
1463 
1464  req = ( struct hwrm_stat_ctx_free_input * )bp->hwrm_addr_req;
1465  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_FREE, cmd_len );
1466  req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
1467  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1468  if ( rc ) {
1469  DBGP ( "- %s ( ): Failed\n", __func__ );
1470  return STATUS_FAILURE;
1471  }
1472 
1473  FLAG_RESET ( bp->flag_hwrm, VALID_STAT_CTX );
1474  return STATUS_SUCCESS;
1475 }
1476 
1477 static int bnxt_hwrm_ring_free_grp ( struct bnxt *bp )
1478 {
1479  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_free_input );
1480  struct hwrm_ring_grp_free_input *req;
1481  int rc;
1482 
1483  DBGP ( "%s\n", __func__ );
1484  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_GRP ) ) )
1485  return STATUS_SUCCESS;
1486 
1487  req = ( struct hwrm_ring_grp_free_input * )bp->hwrm_addr_req;
1488  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_FREE, cmd_len );
1489  req->ring_group_id = ( u32 )bp->ring_grp_id;
1490  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1491  if ( rc ) {
1492  DBGP ( "- %s ( ): Failed\n", __func__ );
1493  return STATUS_FAILURE;
1494  }
1495 
1496  FLAG_RESET ( bp->flag_hwrm, VALID_RING_GRP );
1497  return STATUS_SUCCESS;
1498 }
1499 
1500 static int bnxt_hwrm_ring_alloc_grp ( struct bnxt *bp )
1501 {
1502  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_alloc_input );
1503  struct hwrm_ring_grp_alloc_input *req;
1504  struct hwrm_ring_grp_alloc_output *resp;
1505  int rc;
1506 
1507  DBGP ( "%s\n", __func__ );
1508  if ( bp->thor )
1509  return STATUS_SUCCESS;
1510 
1511  req = ( struct hwrm_ring_grp_alloc_input * )bp->hwrm_addr_req;
1512  resp = ( struct hwrm_ring_grp_alloc_output * )bp->hwrm_addr_resp;
1513  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_ALLOC, cmd_len );
1514  req->cr = bp->cq_ring_id;
1515  req->rr = bp->rx_ring_id;
1516  req->ar = ( u16 )HWRM_NA_SIGNATURE;
1517  if ( bp->vf )
1518  req->sc = bp->stat_ctx_id;
1519 
1520  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1521  if ( rc ) {
1522  DBGP ( "- %s ( ): Failed\n", __func__ );
1523  return STATUS_FAILURE;
1524  }
1525 
1526  FLAG_SET ( bp->flag_hwrm, VALID_RING_GRP );
1527  bp->ring_grp_id = ( u16 )resp->ring_group_id;
1528  return STATUS_SUCCESS;
1529 }
1530 
1531 int bnxt_hwrm_ring_free ( struct bnxt *bp, u16 ring_id, u8 ring_type )
1532 {
1533  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_free_input );
1534  struct hwrm_ring_free_input *req;
1535 
1536  DBGP ( "%s\n", __func__ );
1537  req = ( struct hwrm_ring_free_input * )bp->hwrm_addr_req;
1538  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_FREE, cmd_len );
1539  req->ring_type = ring_type;
1540  req->ring_id = ring_id;
1541  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1542 }
1543 
1544 static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type )
1545 {
1546  u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_alloc_input );
1547  struct hwrm_ring_alloc_input *req;
1548  struct hwrm_ring_alloc_output *resp;
1549  int rc;
1550 
1551  DBGP ( "%s\n", __func__ );
1552  req = ( struct hwrm_ring_alloc_input * )bp->hwrm_addr_req;
1553  resp = ( struct hwrm_ring_alloc_output * )bp->hwrm_addr_resp;
1554  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_ALLOC, cmd_len );
1555  req->ring_type = type;
1556  switch ( type ) {
1558  req->page_size = LM_PAGE_BITS ( 12 );
1559  req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
1560  req->length = ( u32 )bp->nq.ring_cnt;
1561  req->logical_id = 0xFFFF; // Required value for Thor FW?
1562  req->page_tbl_addr = virt_to_bus ( bp->nq.bd_virt );
1563  break;
1565  req->page_size = LM_PAGE_BITS ( 8 );
1566  req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
1567  req->length = ( u32 )bp->cq.ring_cnt;
1568  req->page_tbl_addr = virt_to_bus ( bp->cq.bd_virt );
1569  if ( !bp->thor )
1570  break;
1572  req->nq_ring_id = bp->nq_ring_id;
1573  req->cq_handle = ( u64 )bp->nq_ring_id;
1574  break;
1576  req->page_size = LM_PAGE_BITS ( 8 );
1577  req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL;
1578  req->length = ( u32 )bp->tx.ring_cnt;
1579  req->queue_id = TX_RING_QID;
1580  req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
1581  req->cmpl_ring_id = bp->cq_ring_id;
1582  req->page_tbl_addr = virt_to_bus ( bp->tx.bd_virt );
1583  break;
1585  req->page_size = LM_PAGE_BITS ( 8 );
1586  req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL;
1587  req->length = ( u32 )bp->rx.ring_cnt;
1588  req->stat_ctx_id = ( u32 )STAT_CTX_ID;
1589  req->cmpl_ring_id = bp->cq_ring_id;
1590  req->page_tbl_addr = virt_to_bus ( bp->rx.bd_virt );
1591  if ( !bp->thor )
1592  break;
1593  req->queue_id = ( u16 )RX_RING_QID;
1594  req->rx_buf_size = MAX_ETHERNET_PACKET_BUFFER_SIZE;
1596  break;
1597  default:
1598  return STATUS_SUCCESS;
1599  }
1600  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1601  if ( rc ) {
1602  DBGP ( "- %s ( ): Failed, type = %x\n", __func__, type );
1603  return STATUS_FAILURE;
1604  }
1605 
1607  FLAG_SET ( bp->flag_hwrm, VALID_RING_CQ );
1608  bp->cq_ring_id = resp->ring_id;
1609  } else if ( type == RING_ALLOC_REQ_RING_TYPE_TX ) {
1610  FLAG_SET ( bp->flag_hwrm, VALID_RING_TX );
1611  bp->tx_ring_id = resp->ring_id;
1612  } else if ( type == RING_ALLOC_REQ_RING_TYPE_RX ) {
1613  FLAG_SET ( bp->flag_hwrm, VALID_RING_RX );
1614  bp->rx_ring_id = resp->ring_id;
1615  } else if ( type == RING_ALLOC_REQ_RING_TYPE_NQ ) {
1616  FLAG_SET ( bp->flag_hwrm, VALID_RING_NQ );
1617  bp->nq_ring_id = resp->ring_id;
1618  }
1619  return STATUS_SUCCESS;
1620 }
1621 
1622 static int bnxt_hwrm_ring_alloc_cq ( struct bnxt *bp )
1623 {
1624  DBGP ( "%s\n", __func__ );
1626 }
1627 
1628 static int bnxt_hwrm_ring_alloc_tx ( struct bnxt *bp )
1629 {
1630  DBGP ( "%s\n", __func__ );
1632 }
1633 
1634 static int bnxt_hwrm_ring_alloc_rx ( struct bnxt *bp )
1635 {
1636  DBGP ( "%s\n", __func__ );
1638 }
1639 
1640 static int bnxt_hwrm_ring_free_cq ( struct bnxt *bp )
1641 {
1642  int ret = STATUS_SUCCESS;
1643 
1644  DBGP ( "%s\n", __func__ );
1645  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_CQ ) ) )
1646  return ret;
1647 
1648  ret = RING_FREE ( bp, bp->cq_ring_id, RING_FREE_REQ_RING_TYPE_L2_CMPL );
1649  if ( ret == STATUS_SUCCESS )
1650  FLAG_RESET ( bp->flag_hwrm, VALID_RING_CQ );
1651 
1652  return ret;
1653 }
1654 
1655 static int bnxt_hwrm_ring_free_tx ( struct bnxt *bp )
1656 {
1657  int ret = STATUS_SUCCESS;
1658 
1659  DBGP ( "%s\n", __func__ );
1660  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_TX ) ) )
1661  return ret;
1662 
1663  ret = RING_FREE ( bp, bp->tx_ring_id, RING_FREE_REQ_RING_TYPE_TX );
1664  if ( ret == STATUS_SUCCESS )
1665  FLAG_RESET ( bp->flag_hwrm, VALID_RING_TX );
1666 
1667  return ret;
1668 }
1669 
1670 static int bnxt_hwrm_ring_free_rx ( struct bnxt *bp )
1671 {
1672  int ret = STATUS_SUCCESS;
1673 
1674  DBGP ( "%s\n", __func__ );
1675  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_RX ) ) )
1676  return ret;
1677 
1678  ret = RING_FREE ( bp, bp->rx_ring_id, RING_FREE_REQ_RING_TYPE_RX );
1679  if ( ret == STATUS_SUCCESS )
1680  FLAG_RESET ( bp->flag_hwrm, VALID_RING_RX );
1681 
1682  return ret;
1683 }
1684 
1685 static int bnxt_hwrm_ring_alloc_nq ( struct bnxt *bp )
1686 {
1687  if ( !bp->thor )
1688  return STATUS_SUCCESS;
1690 }
1691 
1692 static int bnxt_hwrm_ring_free_nq ( struct bnxt *bp )
1693 {
1694  int ret = STATUS_SUCCESS;
1695 
1696  if ( !bp->thor )
1697  return STATUS_SUCCESS;
1698 
1699  DBGP ( "%s\n", __func__ );
1700  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_NQ ) ) )
1701  return ret;
1702 
1703  ret = RING_FREE ( bp, bp->nq_ring_id, RING_FREE_REQ_RING_TYPE_NQ );
1704  if ( ret == STATUS_SUCCESS )
1705  FLAG_RESET ( bp->flag_hwrm, VALID_RING_NQ );
1706 
1707  return ret;
1708 }
1709 
1710 static int bnxt_hwrm_vnic_alloc ( struct bnxt *bp )
1711 {
1712  u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_alloc_input );
1713  struct hwrm_vnic_alloc_input *req;
1714  struct hwrm_vnic_alloc_output *resp;
1715  int rc;
1716 
1717  DBGP ( "%s\n", __func__ );
1718  req = ( struct hwrm_vnic_alloc_input * )bp->hwrm_addr_req;
1719  resp = ( struct hwrm_vnic_alloc_output * )bp->hwrm_addr_resp;
1720  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_ALLOC, cmd_len );
1721  req->flags = VNIC_ALLOC_REQ_FLAGS_DEFAULT;
1722  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1723  if ( rc ) {
1724  DBGP ( "- %s ( ): Failed\n", __func__ );
1725  return STATUS_FAILURE;
1726  }
1727 
1728  FLAG_SET ( bp->flag_hwrm, VALID_VNIC_ID );
1729  bp->vnic_id = resp->vnic_id;
1730  return STATUS_SUCCESS;
1731 }
1732 
1733 static int bnxt_hwrm_vnic_free ( struct bnxt *bp )
1734 {
1735  u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_free_input );
1736  struct hwrm_vnic_free_input *req;
1737  int rc;
1738 
1739  DBGP ( "%s\n", __func__ );
1740  if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_VNIC_ID ) ) )
1741  return STATUS_SUCCESS;
1742 
1743  req = ( struct hwrm_vnic_free_input * )bp->hwrm_addr_req;
1744  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_FREE, cmd_len );
1745  req->vnic_id = bp->vnic_id;
1746  rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1747  if ( rc ) {
1748  DBGP ( "- %s ( ): Failed\n", __func__ );
1749  return STATUS_FAILURE;
1750  }
1751 
1752  FLAG_RESET ( bp->flag_hwrm, VALID_VNIC_ID );
1753  return STATUS_SUCCESS;
1754 }
1755 
1756 static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp )
1757 {
1758  u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_cfg_input );
1759  struct hwrm_vnic_cfg_input *req;
1760 
1761  DBGP ( "%s\n", __func__ );
1762  req = ( struct hwrm_vnic_cfg_input * )bp->hwrm_addr_req;
1763  hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_CFG, cmd_len );
1765  req->mru = bp->mtu;
1766 
1767  if ( bp->thor ) {
1770  req->default_rx_ring_id = bp->rx_ring_id;
1771  req->default_cmpl_ring_id = bp->cq_ring_id;
1772  } else {
1774  req->dflt_ring_grp = bp->ring_grp_id;
1775  }
1776 
1778  req->vnic_id = bp->vnic_id;
1779  return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
1780 }
1781 
1782 static int bnxt_set_rx_mask ( struct bnxt *bp )
1783 {
1784  return bnxt_hwrm_set_rx_mask ( bp, RX_MASK );
1785 }
1786 
1787 static int bnxt_reset_rx_mask ( struct bnxt *bp )
1788 {
1789  return bnxt_hwrm_set_rx_mask ( bp, 0 );
1790 }
1791 
1792 typedef int ( *hwrm_func_t ) ( struct bnxt *bp );
1793 
1795  bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */
1796  NULL,
1797 };
1798 
1800  bnxt_hwrm_cfa_l2_filter_free, /* HWRM_CFA_L2_FILTER_FREE */
1802  bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
1803  bnxt_free_rx_iob, /* HWRM_FREE_IOB */
1804  bnxt_hwrm_vnic_free, /* HWRM_VNIC_FREE */
1805  bnxt_hwrm_ring_free_grp, /* HWRM_RING_GRP_FREE */
1806  bnxt_hwrm_ring_free_rx, /* HWRM_RING_FREE - RX Ring */
1807  bnxt_hwrm_ring_free_tx, /* HWRM_RING_FREE - TX Ring */
1808  bnxt_hwrm_stat_ctx_free, /* HWRM_STAT_CTX_FREE */
1809  bnxt_hwrm_ring_free_cq, /* HWRM_RING_FREE - CQ Ring */
1810  bnxt_hwrm_ring_free_nq, /* HWRM_RING_FREE - NQ Ring */
1811  NULL,
1812 };
1814  bnxt_hwrm_ver_get, /* HWRM_VER_GET */
1815  bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */
1816  bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */
1817  bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */
1818  bnxt_hwrm_backing_store_cfg, /* HWRM_FUNC_BACKING_STORE_CFG */
1819  bnxt_hwrm_backing_store_qcfg, /* HWRM_FUNC_BACKING_STORE_QCFG */
1820  bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */
1821  bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */
1822  bnxt_get_vlan, /* HWRM_NVM_GET_VARIABLE - vlan */
1823  bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */
1824  bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG */
1825  bnxt_query_phy_link, /* HWRM_PORT_PHY_QCFG */
1826  bnxt_get_device_address, /* HW MAC address */
1827  NULL,
1828 };
1829 
1831  bnxt_hwrm_stat_ctx_alloc, /* HWRM_STAT_CTX_ALLOC */
1832  bnxt_hwrm_queue_qportcfg, /* HWRM_QUEUE_QPORTCFG */
1833  bnxt_hwrm_ring_alloc_nq, /* HWRM_RING_ALLOC - NQ Ring */
1834  bnxt_hwrm_ring_alloc_cq, /* HWRM_RING_ALLOC - CQ Ring */
1835  bnxt_hwrm_ring_alloc_tx, /* HWRM_RING_ALLOC - TX Ring */
1836  bnxt_hwrm_ring_alloc_rx, /* HWRM_RING_ALLOC - RX Ring */
1837  bnxt_hwrm_ring_alloc_grp, /* HWRM_RING_GRP_ALLOC - Group */
1838  bnxt_hwrm_vnic_alloc, /* HWRM_VNIC_ALLOC */
1839  bnxt_post_rx_buffers, /* Post RX buffers */
1840  bnxt_hwrm_set_async_event, /* ENABLES_ASYNC_EVENT_CR */
1841  bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */
1842  bnxt_hwrm_cfa_l2_filter_alloc, /* HWRM_CFA_L2_FILTER_ALLOC */
1843  bnxt_get_phy_link, /* HWRM_PORT_PHY_QCFG - PhyLink */
1844  bnxt_set_rx_mask, /* HWRM_CFA_L2_SET_RX_MASK */
1845  NULL,
1846 };
1847 
1848 int bnxt_hwrm_run ( hwrm_func_t cmds[], struct bnxt *bp )
1849 {
1850  hwrm_func_t *ptr;
1851  int ret;
1852 
1853  for ( ptr = cmds; *ptr; ++ptr ) {
1854  memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
1855  memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
1856  ret = ( *ptr ) ( bp );
1857  if ( ret ) {
1858  DBGP ( "- %s ( ): Failed\n", __func__ );
1859  return STATUS_FAILURE;
1860  }
1861  }
1862  return STATUS_SUCCESS;
1863 }
1864 
1865 #define bnxt_down_chip( bp ) bnxt_hwrm_run ( bring_down_chip, bp )
1866 #define bnxt_up_chip( bp ) bnxt_hwrm_run ( bring_up_chip, bp )
1867 #define bnxt_down_nic( bp ) bnxt_hwrm_run ( bring_down_nic, bp )
1868 #define bnxt_up_nic( bp ) bnxt_hwrm_run ( bring_up_nic, bp )
1869 
1870 static int bnxt_open ( struct net_device *dev )
1871 {
1872  struct bnxt *bp = netdev_priv ( dev );
1873 
1874  DBGP ( "%s\n", __func__ );
1875  bnxt_mm_nic ( bp );
1876  return (bnxt_up_nic ( bp ));
1877 }
1878 
1879 static void bnxt_tx_adjust_pkt ( struct bnxt *bp, struct io_buffer *iob )
1880 {
1881  u16 prev_len = iob_len ( iob );
1882 
1883  bp->vlan_tx = bnxt_get_pkt_vlan ( ( char * )iob->data );
1884  if ( !bp->vlan_tx && bp->vlan_id )
1885  bnxt_add_vlan ( iob, bp->vlan_id );
1886 
1887  dbg_tx_vlan ( bp, ( char * )iob->data, prev_len, iob_len ( iob ) );
1888  if ( iob_len ( iob ) != prev_len )
1889  prev_len = iob_len ( iob );
1890 
1891  iob_pad ( iob, ETH_ZLEN );
1892  dbg_tx_pad ( prev_len, iob_len ( iob ) );
1893 }
1894 
1895 static int bnxt_tx ( struct net_device *dev, struct io_buffer *iob )
1896 {
1897  struct bnxt *bp = netdev_priv ( dev );
1898  u16 len, entry;
1899  dma_addr_t mapping;
1900 
1901  if ( bnxt_tx_avail ( bp ) < 1 ) {
1902  DBGP ( "- %s ( ): Failed no bd's available\n", __func__ );
1903  return -ENOBUFS;
1904  }
1905 
1906  bnxt_tx_adjust_pkt ( bp, iob );
1907  entry = bp->tx.prod_id;
1908  mapping = virt_to_bus ( iob->data );
1909  len = iob_len ( iob );
1910  bp->tx.iob[entry] = iob;
1911  bnxt_set_txq ( bp, entry, mapping, len );
1912  entry = NEXT_IDX ( entry, bp->tx.ring_cnt );
1913  dump_tx_pkt ( ( u8 * )iob->data, len, bp->tx.prod_id );
1914  /* Packets are ready, update Tx producer idx local and on card. */
1915  bnxt_db_tx ( bp, ( u32 )entry );
1916  bp->tx.prod_id = entry;
1917  bp->tx.cnt_req++;
1918  /* memory barrier */
1919  mb ( );
1920  return 0;
1921 }
1922 
1923 static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt )
1924 {
1925  u16 cons_id;
1926 
1927  cons_id = bp->nq.cons_id + cnt;
1928  if ( cons_id >= bp->nq.ring_cnt ) {
1929  /* Toggle completion bit when the ring wraps. */
1930  bp->nq.completion_bit ^= 1;
1931  cons_id = cons_id - bp->nq.ring_cnt;
1932  }
1933  bp->nq.cons_id = cons_id;
1934 }
1935 
1936 void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt )
1937 {
1938  switch ( evt->event_id ) {
1940  if ( evt->event_data1 & 0x01 )
1941  bp->link_status = STATUS_LINK_ACTIVE;
1942  else
1943  bp->link_status = STATUS_LINK_DOWN;
1944  bnxt_set_link ( bp );
1945  dbg_link_status ( bp );
1946  break;
1947  default:
1948  break;
1949  }
1950 }
1951 
1952 static void bnxt_service_cq ( struct net_device *dev )
1953 {
1954  struct bnxt *bp = netdev_priv ( dev );
1955  struct cmpl_base *cmp;
1956  struct tx_cmpl *tx;
1957  u16 old_cid = bp->cq.cons_id;
1958  int done = SERVICE_NEXT_CQ_BD;
1959  u32 cq_type;
1960 
1961  while ( done == SERVICE_NEXT_CQ_BD ) {
1962  cmp = ( struct cmpl_base * )BD_NOW ( bp->cq.bd_virt,
1963  bp->cq.cons_id,
1964  sizeof ( struct cmpl_base ) );
1965 
1966  if ( ( cmp->info3_v & CMPL_BASE_V ) ^ bp->cq.completion_bit )
1967  break;
1968 
1969  cq_type = cmp->type & CMPL_BASE_TYPE_MASK;
1970  dump_evt ( ( u8 * )cmp, cq_type, bp->cq.cons_id, 0 );
1971  dump_cq ( cmp, bp->cq.cons_id );
1972 
1973  switch ( cq_type ) {
1974  case CMPL_BASE_TYPE_TX_L2:
1975  tx = ( struct tx_cmpl * )cmp;
1976  bnxt_tx_complete ( dev, ( u16 )tx->opaque );
1977  /* Fall through */
1979  bnxt_adv_cq_index ( bp, 1 );
1980  break;
1981  case CMPL_BASE_TYPE_RX_L2:
1982  done = bnxt_rx_complete ( dev,
1983  ( struct rx_pkt_cmpl * )cmp );
1984  break;
1986  bnxt_link_evt ( bp,
1987  ( struct hwrm_async_event_cmpl * )cmp );
1988  bnxt_adv_cq_index ( bp, 1 );
1989  break;
1990  default:
1992  break;
1993  }
1994  }
1995 
1996  if ( bp->cq.cons_id != old_cid )
1997  bnxt_db_cq ( bp );
1998 }
1999 
2000 static void bnxt_service_nq ( struct net_device *dev )
2001 {
2002  struct bnxt *bp = netdev_priv ( dev );
2003  struct nq_base *nqp;
2004  u16 old_cid = bp->nq.cons_id;
2005  int done = SERVICE_NEXT_NQ_BD;
2006  u32 nq_type;
2007 
2008  if ( !bp->thor )
2009  return;
2010 
2011  while ( done == SERVICE_NEXT_NQ_BD ) {
2012  nqp = ( struct nq_base * )BD_NOW ( bp->nq.bd_virt,
2013  bp->nq.cons_id, sizeof ( struct nq_base ) );
2014  if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit )
2015  break;
2016  nq_type = ( nqp->type & NQ_CN_TYPE_MASK );
2017  dump_evt ( ( u8 * )nqp, nq_type, bp->nq.cons_id, 1 );
2018  dump_nq ( nqp, bp->nq.cons_id );
2019 
2020  switch ( nq_type ) {
2022  bnxt_link_evt ( bp,
2023  ( struct hwrm_async_event_cmpl * )nqp );
2024  /* Fall through */
2026  bnxt_adv_nq_index ( bp, 1 );
2027  break;
2028  default:
2030  break;
2031  }
2032  }
2033 
2034  if ( bp->nq.cons_id != old_cid )
2035  bnxt_db_nq ( bp );
2036 }
2037 
2038 static void bnxt_poll ( struct net_device *dev )
2039 {
2040  mb ( );
2041  bnxt_service_cq ( dev );
2042  bnxt_service_nq ( dev );
2043 }
2044 
2045 static void bnxt_close ( struct net_device *dev )
2046 {
2047  struct bnxt *bp = netdev_priv ( dev );
2048 
2049  DBGP ( "%s\n", __func__ );
2050  bnxt_down_nic (bp);
2051 
2052  /* iounmap PCI BAR ( s ) */
2053  bnxt_down_pci(bp);
2054 
2055  /* Get Bar Address */
2056  bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 );
2057  bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 );
2058  bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 );
2059 
2060 }
2061 
2063  .open = bnxt_open,
2064  .close = bnxt_close,
2065  .poll = bnxt_poll,
2066  .transmit = bnxt_tx,
2067 };
2068 
2069 static int bnxt_init_one ( struct pci_device *pci )
2070 {
2071  struct net_device *netdev;
2072  struct bnxt *bp;
2073  int err = 0;
2074 
2075  DBGP ( "%s\n", __func__ );
2076  /* Allocate network device */
2077  netdev = alloc_etherdev ( sizeof ( *bp ) );
2078  if ( !netdev ) {
2079  DBGP ( "- %s ( ): alloc_etherdev Failed\n", __func__ );
2080  err = -ENOMEM;
2081  goto disable_pdev;
2082  }
2083 
2084  /* Initialise network device */
2086 
2087  /* Driver private area for this device */
2088  bp = netdev_priv ( netdev );
2089 
2090  /* Set PCI driver private data */
2091  pci_set_drvdata ( pci, netdev );
2092 
2093  /* Clear Private area data */
2094  memset ( bp, 0, sizeof ( *bp ) );
2095  bp->pdev = pci;
2096  bp->dev = netdev;
2097  netdev->dev = &pci->dev;
2098 
2099  /* Enable PCI device */
2100  adjust_pci_device ( pci );
2101 
2102  /* Get PCI Information */
2103  bnxt_get_pci_info ( bp );
2104 
2105  /* Allocate and Initialise device specific parameters */
2106  if ( bnxt_alloc_mem ( bp ) != 0 ) {
2107  DBGP ( "- %s ( ): bnxt_alloc_mem Failed\n", __func__ );
2108  goto err_down_pci;
2109  }
2110 
2111  /* Get device specific information */
2112  if ( bnxt_up_chip ( bp ) != 0 ) {
2113  DBGP ( "- %s ( ): bnxt_up_chip Failed\n", __func__ );
2114  goto err_down_chip;
2115  }
2116 
2117  /* Register Network device */
2118  if ( register_netdev ( netdev ) != 0 ) {
2119  DBGP ( "- %s ( ): register_netdev Failed\n", __func__ );
2120  goto err_down_chip;
2121  }
2122 
2123  return 0;
2124 
2125 err_down_chip:
2126  bnxt_down_chip (bp);
2127  bnxt_free_mem ( bp );
2128 
2129 err_down_pci:
2130  bnxt_down_pci ( bp );
2131  netdev_nullify ( netdev );
2132  netdev_put ( netdev );
2133 
2134 disable_pdev:
2135  pci_set_drvdata ( pci, NULL );
2136  return err;
2137 }
2138 
2139 static void bnxt_remove_one ( struct pci_device *pci )
2140 {
2141  struct net_device *netdev = pci_get_drvdata ( pci );
2142  struct bnxt *bp = netdev_priv ( netdev );
2143 
2144  DBGP ( "%s\n", __func__ );
2145  /* Unregister network device */
2147 
2148  /* Bring down Chip */
2149  bnxt_down_chip(bp);
2150 
2151  /* Free Allocated resource */
2152  bnxt_free_mem ( bp );
2153 
2154  /* iounmap PCI BAR ( s ) */
2155  bnxt_down_pci ( bp );
2156 
2157  /* Stop network device */
2158  netdev_nullify ( netdev );
2159 
2160  /* Drop refernce to network device */
2161  netdev_put ( netdev );
2162 }
2163 
2164 /* Broadcom NXE PCI driver */
2165 struct pci_driver bnxt_pci_driver __pci_driver = {
2166  .ids = bnxt_nics,
2167  .id_count = ARRAY_SIZE ( bnxt_nics ),
2168  .probe = bnxt_init_one,
2169  .remove = bnxt_remove_one,
2170 };
#define VNIC_CFG_REQ_ENABLES_MRU
Definition: bnxt_hsi.h:5510
#define dump_evt(cq, ty, id, ring)
Definition: bnxt_dbg.h:674
#define RING_ALLOC_REQ_RING_TYPE_NQ
Definition: bnxt_hsi.h:5904
#define u16
Definition: vga.h:20
#define dbg_tx_avail(bp, a, u)
Definition: bnxt_dbg.h:577
uint16_t u16
Definition: stdint.h:21
#define IPXE_VERSION_MAJOR
Definition: bnxt.h:38
#define bnxt_up_chip(bp)
Definition: bnxt.c:1866
#define EINVAL
Invalid argument.
Definition: errno.h:428
#define FLAG_SET(f, b)
Definition: bnxt.h:45
static u32 bnxt_set_ring_info(struct bnxt *bp)
Definition: bnxt.c:736
#define DB_OFFSET_VF
Definition: bnxt.h:186
#define LINK_SPEED_DRV_2_5G
Definition: bnxt.h:223
#define MAX_NQ_DESC_CNT
Definition: bnxt.h:168
#define HWRM_STAT_CTX_FREE
Definition: bnxt_hsi.h:191
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
Definition: bnxt_hsi.h:6369
#define STATUS_SUCCESS
Definition: bnxt.h:57
static void * netdev_priv(struct net_device *netdev)
Get driver private area for this network device.
Definition: netdevice.h:566
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
#define DBC_MSG_IDX(idx)
Definition: bnxt.h:187
#define pci_read_byte
Definition: bnxt.h:827
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition: netdevice.h:746
static void bnxt_service_cq(struct net_device *dev)
Definition: bnxt.c:1952
#define DETECT_MEDIA
Definition: bnxt.h:194
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
Definition: bnxt_hsi.h:2992
#define iob_put(iobuf, len)
Definition: iobuf.h:116
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB
Definition: bnxt_hsi.h:2978
#define MAC_HDR_SIZE
Definition: bnxt.h:179
static int bnxt_hwrm_ver_get(struct bnxt *bp)
Definition: bnxt.c:630
#define dbg_flags(func, flags)
Definition: bnxt_dbg.h:326
static void bnxt_db_tx(struct bnxt *bp, u32 idx)
Definition: bnxt.c:168
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:501
#define MEDIA_AUTO_DETECT_MASK
Definition: bnxt.h:858
u32 opaque
Definition: bnxt.h:596
#define HWRM_PORT_MAC_CFG
Definition: bnxt_hsi.h:119
#define HWRM_CFA_L2_FILTER_FREE
Definition: bnxt_hsi.h:172
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
Definition: bnxt_hsi.h:6368
#define HWRM_FUNC_CFG
Definition: bnxt_hsi.h:109
#define BD_NOW(bd, entry, len)
Definition: bnxt.h:158
#define IPXE_VERSION_UPDATE
Definition: bnxt.h:40
#define CMPL_BASE_V
Definition: bnxt.h:508
A PCI driver.
Definition: pci.h:224
#define SHORT_CMD_SUPPORTED
Definition: bnxt.h:830
#define dbg_hw_cmd(bp, func, cmd_len, resp_len, cmd_tmo, err)
Definition: bnxt_dbg.h:378
#define TX_BD_FLAGS
Definition: bnxt.h:841
#define VLAN_VALUE_MASK
Definition: bnxt.h:862
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN
Definition: bnxt_hsi.h:1787
#define MEDIUM_SPEED_2500MBPS
Definition: bnxt.h:100
#define dbg_alloc_rx_iob(iob, id, cid)
Definition: bnxt_dbg.h:470
__le16 seq_id
Definition: bnxt_hsi.h:71
#define HWRM_CMD_DEFAULT_MULTIPLAYER(a)
Definition: bnxt.h:132
#define DEFAULT_NUMBER_OF_CMPL_RINGS
Definition: bnxt.h:137
static int bnxt_hwrm_ring_alloc_tx(struct bnxt *bp)
Definition: bnxt.c:1628
#define dbg_func_resource_qcaps(bp)
Definition: bnxt_dbg.h:320
#define RESP_BUFFER_SIZE
Definition: bnxt.h:153
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
#define DEFAULT_NUMBER_OF_RING_GRPS
Definition: bnxt.h:140
#define RX_PKT_CMPL_METADATA_VID_MASK
Definition: bnxt.h:623
__le16 def_req_timeout
Definition: bnxt_hsi.h:440
#define SERVICE_NEXT_NQ_BD
Definition: bnxt.h:176
#define HWRM_VER_GET
Definition: bnxt_hsi.h:98
#define dump_cq(cq, id)
Definition: bnxt_dbg.h:516
#define CQ_DOORBELL_KEY_IDX(a)
Definition: bnxt.h:837
Error codes.
int(* hwrm_func_t)(struct bnxt *bp)
Definition: bnxt.c:1792
#define HWRM_FUNC_RESET
Definition: bnxt_hsi.h:103
#define FUNC_DRV_RGTR_REQ_ENABLES_VER
Definition: bnxt_hsi.h:1736
#define dbg_short_cmd(sreq, func, len)
Definition: bnxt_dbg.h:398
__le16 signature
Definition: bnxt_hsi.h:87
static int bnxt_get_device_address(struct bnxt *bp)
Definition: bnxt.c:104
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB
Definition: bnxt_hsi.h:2973
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
Definition: bnxt.h:618
#define test_if
Definition: bnxt.h:824
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID
Definition: bnxt_hsi.h:6229
#define dbg_rxp(iob, rx_len, drop)
Definition: bnxt_dbg.h:473
#define HWRM_CMD_FLASH_MULTIPLAYER(a)
Definition: bnxt.h:133
#define RX_MASK_PROMISCUOUS_MODE
Definition: bnxt.h:91
#define LINK_SPEED_DRV_AUTONEG
Definition: bnxt.h:207
I/O buffers.
void free_iob(struct io_buffer *iobuf)
Free I/O buffer.
Definition: iobuf.c:145
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:226
static void hwrm_init(struct bnxt *bp, struct input *req, u16 cmd, u16 len)
Definition: bnxt.c:564
#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
Definition: bnxt_hsi.h:2987
#define NQ_RING_BUFFER_SIZE
Definition: bnxt.h:169
uint8_t type
Type.
Definition: ena.h:16
int bnxt_hwrm_run(hwrm_func_t cmds[], struct bnxt *bp)
Definition: bnxt.c:1848
static int bnxt_hwrm_func_cfg_req(struct bnxt *bp)
Definition: bnxt.c:874
#define FLAG_TEST(f, b)
Definition: bnxt.h:46
#define VALID_DRIVER_REG
Definition: bnxt.h:709
#define PCI_COMMAND_INTX_DISABLE
Interrupt disable.
Definition: pci.h:31
static void bnxt_db_nq(struct bnxt *bp)
Definition: bnxt.c:141
#define dbg_tx_vlan(bp, src, plen, len)
Definition: bnxt_dbg.h:578
#define HWRM_VERSION_MINOR
Definition: bnxt_hsi.h:369
__le16 req_type
Definition: bnxt_hsi.h:86
#define RING_FREE(bp, rid, flag)
Definition: bnxt.h:849
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK
Definition: bnxt_hsi.h:2960
#define write32
Definition: bnxt.h:825
#define LINK_POLL_WAIT_TIME
Definition: bnxt.h:163
static int bnxt_hwrm_ring_free_rx(struct bnxt *bp)
Definition: bnxt.c:1670
Definition: bnxt_hsi.h:68
static void * bnxt_pci_base(struct pci_device *pdev, unsigned int reg)
Definition: bnxt.c:57
#define RING_FREE_REQ_RING_TYPE_TX
Definition: bnxt_hsi.h:5982
switch(len)
Definition: string.h:61
static int bnxt_hwrm_ring_alloc_grp(struct bnxt *bp)
Definition: bnxt.c:1500
uint16_t bp
Definition: registers.h:23
static u32 bnxt_tx_avail(struct bnxt *bp)
Definition: bnxt.c:218
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
Definition: bnxt_hsi.h:6202
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
Definition: bnxt_hsi.h:553
#define PCI_BASE_ADDRESS_0
Definition: pci.h:61
unsigned long dma_addr_t
Definition: bnx2.h:20
#define SET_MBA(p, m, s)
Definition: bnxt.h:851
#define HWRM_FUNC_BACKING_STORE_CFG
Definition: bnxt_hsi.h:283
#define D3_LINK_SPEED_FW_NUM
Definition: bnxt.h:294
#define D3_SPEED_FW_SHIFT
Definition: bnxt.h:857
static int bnxt_hwrm_ring_alloc(struct bnxt *bp, u8 type)
Definition: bnxt.c:1544
#define write64
Definition: bnxt.h:826
void bnxt_link_evt(struct bnxt *bp, struct hwrm_async_event_cmpl *evt)
Definition: bnxt.c:1936
__le16 default_rx_ring_id
Definition: bnxt_hsi.h:5519
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:186
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
Definition: bnxt.c:898
__le16 max_ext_req_len
Definition: bnxt_hsi.h:466
#define BNXT_RX_STD_DMA_SZ
Definition: bnxt.h:156
iPXE timers
#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
Definition: bnxt_hsi.h:1533
#define PCI_COMMAND
PCI command.
Definition: pci.h:24
void bnxt_mm_nic(struct bnxt *bp)
Definition: bnxt.c:467
static int bnxt_hwrm_cfa_l2_filter_alloc(struct bnxt *bp)
Definition: bnxt.c:986
#define LINK_SPEED_DRV_NUM
Definition: bnxt.h:204
#define STATUS_LINK_DOWN
Definition: bnxt.h:62
#define BNXT_FLAG_MULTI_HOST
Definition: bnxt.h:51
#define MEDIUM_SPEED_10GBPS
Definition: bnxt.h:101
#define LM_PAGE_BITS(a)
Definition: bnxt.h:155
static u16 bnxt_vf_nics[]
Definition: bnxt.h:999
#define HWRM_FUNC_QCAPS
Definition: bnxt_hsi.h:107
static int bnxt_hwrm_func_resource_qcaps(struct bnxt *bp)
Definition: bnxt.c:673
#define GET_MEDIUM_SPEED(m)
Definition: bnxt.h:112
static int bnxt_hwrm_func_reset_req(struct bnxt *bp)
Definition: bnxt.c:860
#define FUNC_CFG_REQ_EVB_MODE_NO_EVB
Definition: bnxt_hsi.h:1607
#define PORT_PHY_QCFG_RESP_LINK_LINK
Definition: bnxt_hsi.h:3099
static int bnxt_init_one(struct pci_device *pci)
Definition: bnxt.c:2069
#define VLAN_HDR_SIZE
Definition: bnxt.h:180
static void bnxt_adv_cq_index(struct bnxt *bp, u16 cnt)
Definition: bnxt.c:389
static int bnxt_hwrm_nvm_get_variable_req(struct bnxt *bp, u16 data_len, u16 option_num, u16 dimensions, u16 index_0)
Definition: bnxt.c:1120
#define D3_SPEED_FW_MASK
Definition: bnxt.h:856
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:149
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:128
#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
Definition: bnxt_hsi.h:2940
#define MEDIUM_SPEED_50GBPS
Definition: bnxt.h:105
#define CHIP_NUM_57500
Definition: bnxt.h:872
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE
Definition: bnxt_hsi.h:2956
#define PCI_SUBSYSTEM_ID
PCI subsystem ID.
Definition: pci.h:77
struct device dev
Generic device.
Definition: pci.h:189
#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER
Definition: bnxt_hsi.h:1742
#define BNXT_FLAG_NPAR_MODE
Definition: bnxt.h:52
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK
Definition: bnxt_hsi.h:6215
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
Definition: bnxt_hsi.h:1549
u32 v
Definition: bnxt.h:544
u16 len
Definition: bnxt.h:440
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID
Definition: bnxt_hsi.h:5897
void bnxt_add_vlan(struct io_buffer *iob, u16 vlan)
Definition: bnxt.c:177
static void bnxt_close(struct net_device *dev)
Definition: bnxt.c:2045
dma_addr_t addr
Definition: bnxt.h:31
#define MEDIUM_SPEED_200GBPS
Definition: bnxt.h:107
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST
Definition: bnxt_hsi.h:1410
Dynamic memory allocation.
#define HWRM_FUNC_QCFG
Definition: bnxt_hsi.h:108
static int bnxt_hwrm_func_qcaps_req(struct bnxt *bp)
Definition: bnxt.c:789
#define PHY_STATUS
Definition: bnxt.h:192
#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT
Definition: bnxt.h:681
void bnxt_rx_process(struct net_device *dev, struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi)
Definition: bnxt.c:402
#define RX_MASK_ACCEPT_NONE
Definition: bnxt.h:85
#define MEDIUM_SPEED_25GBPS
Definition: bnxt.h:103
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:498
#define NQ_CN_TYPE_MASK
Definition: bnxt.h:533
int bnxt_free_rx_iob(struct bnxt *bp)
Definition: bnxt.c:263
#define dbg_pci(bp, func, creg)
Definition: bnxt_dbg.h:140
#define dump_nq(nq, id)
Definition: bnxt_dbg.h:517
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:338
#define DEFAULT_NUMBER_OF_STAT_CTXS
Definition: bnxt.h:141
#define STATUS_LINK_ACTIVE
Definition: bnxt.h:61
u16 type
Definition: bnxt.h:525
#define HWRM_RING_GRP_ALLOC
Definition: bnxt_hsi.h:165
#define ENOMEM
Not enough space.
Definition: errno.h:534
#define pci_write_word
Definition: bnxt.h:829
#define LINK_SPEED_FW_NUM
Definition: bnxt.h:270
#define LINK_SPEED_DRV_40G
Definition: bnxt.h:215
u32 opaque
Definition: bnxt.h:441
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define VALID_RING_CQ
Definition: bnxt.h:711
int bnxt_alloc_mem(struct bnxt *bp)
Definition: bnxt.c:537
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX
Definition: bnxt_hsi.h:2996
#define NO_MORE_NQ_BD_TO_SERVICE
Definition: bnxt.h:175
#define QCFG_PHY_ALL
Definition: bnxt.h:196
#define VALID_VNIC_ID
Definition: bnxt.h:715
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID
Definition: bnxt_hsi.h:5512
#define RX_MASK
Definition: bnxt.h:164
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
uint16_t device
Device ID.
Definition: pci.h:204
#define MAX_RX_DESC_CNT
Definition: bnx2.h:3885
#define VLAN_VALUE_SHIFT
Definition: bnxt.h:863
static int bnxt_hwrm_set_rx_mask(struct bnxt *bp, u32 rx_mask)
Definition: bnxt.c:1070
static int wait_resp(struct bnxt *bp, u32 tmo, u16 len, const char *func)
Definition: bnxt.c:600
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB
Definition: bnxt_hsi.h:2976
#define DBGP(...)
Definition: compiler.h:532
int bnxt_post_rx_buffers(struct bnxt *bp)
Definition: bnxt.c:313
#define REQ_BUFFER_SIZE
Definition: bnxt.h:152
#define HWRM_QUEUE_QPORTCFG
Definition: bnxt_hsi.h:134
#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR
Definition: bnxt_hsi.h:1544
#define SHORT_CMD_REQUIRED
Definition: bnxt.h:831
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
#define RX_RING_BUFFER_SIZE
Definition: bnxt.h:147
#define HWRM_CFA_L2_SET_RX_MASK
Definition: bnxt_hsi.h:174
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:555
#define DEFAULT_NUMBER_OF_RX_RINGS
Definition: bnxt.h:139
#define dbg_mem(bp, func)
Definition: bnxt_dbg.h:175
Ethernet protocol.
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
Definition: bnxt.c:927
#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
Definition: bnxt.h:498
static void bnxt_tx_adjust_pkt(struct bnxt *bp, struct io_buffer *iob)
Definition: bnxt.c:1879
#define HWRM_FUNC_VF_CFG
Definition: bnxt_hsi.h:101
#define BYTE_SWAP_S(w)
Definition: bnxt.h:182
#define dbg_func_qcaps(bp)
Definition: bnxt_dbg.h:321
static int bnxt_reset_rx_mask(struct bnxt *bp)
Definition: bnxt.c:1787
#define SPEED_FW_SHIFT
Definition: bnxt.h:855
static int bnxt_hwrm_ring_free_nq(struct bnxt *bp)
Definition: bnxt.c:1692
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID
Definition: bnxt_hsi.h:5896
static int bnxt_hwrm_ring_alloc_rx(struct bnxt *bp)
Definition: bnxt.c:1634
#define DB_OFFSET_PF
Definition: bnxt.h:185
#define dbg_rx_vlan(bp, metadata, flags2, rx_vid)
Definition: bnxt_dbg.h:469
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:768
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP
Definition: bnxt_hsi.h:5506
#define PCI_BASE_ADDRESS_2
Definition: pci.h:63
static int bnxt_hwrm_cfa_l2_filter_free(struct bnxt *bp)
Definition: bnxt.c:1027
#define pci_read_word16
Definition: bnxt.h:828
#define HWRM_CFA_L2_FILTER_ALLOC
Definition: bnxt_hsi.h:171
#define u32
Definition: vga.h:21
#define bnxt_down_nic(bp)
Definition: bnxt.c:1867
void udelay(unsigned long usecs)
Delay for a fixed number of microseconds.
Definition: timer.c:60
u32 info3_v
Definition: bnxt.h:507
static int bnxt_hwrm_set_async_event(struct bnxt *bp)
Definition: bnxt.c:948
#define BNXT_DMA_ALIGNMENT
Definition: bnxt.h:150
static struct net_device * netdev
Definition: gdbudp.c:52
uint64_t u64
Definition: stdint.h:25
#define PORT_PHY_FLAGS
Definition: bnxt.h:846
#define u8
Definition: igbvf_osdep.h:38
static int bnxt_hwrm_ring_alloc_cq(struct bnxt *bp)
Definition: bnxt.c:1622
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
#define STATUS_TIMEOUT
Definition: bnxt.h:81
#define NQ_CN_TYPE_CQ_NOTIFICATION
Definition: bnxt.h:536
static void bnxt_tx_complete(struct net_device *dev, u16 hw_idx)
Definition: bnxt.c:250
#define CMPL_BASE_TYPE_RX_L2
Definition: bnxt.h:490
#define TX_AVAIL(r)
Definition: bnxt.h:173
static void bnxt_down_pci(struct bnxt *bp)
Definition: bnxt.c:40
#define HWRM_NA_SIGNATURE
Definition: bnxt_hsi.h:362
__le16 cmpl_ring
Definition: bnxt_hsi.h:70
#define bnxt_down_chip(bp)
Definition: bnxt.c:1865
#define NUM_RX_BUFFERS
Definition: bnxt.h:142
#define HWRM_FUNC_DRV_RGTR
Definition: bnxt_hsi.h:115
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition: netdevice.c:844
static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, u16 idx)
Definition: bnxt.c:1085
static int bnxt_hwrm_ring_free_tx(struct bnxt *bp)
Definition: bnxt.c:1655
static int bnxt_get_pci_info(struct bnxt *bp)
Definition: bnxt.c:66
hwrm_func_t bring_up_chip[]
Definition: bnxt.c:1813
u32 flags2
Definition: bnxt.h:610
uint32_t rx
Maximum number of receive queues.
Definition: intelvf.h:16
__le64 req_addr
Definition: bnxt_hsi.h:92
static void bnxt_remove_one(struct pci_device *pci)
Definition: bnxt.c:2139
#define SUPPORT_SPEEDS
Definition: bnxt.h:195
#define TX_RING_BUFFER_SIZE
Definition: bnxt.h:146
#define TX_BD_SHORT_FLAGS_LHINT_LT512
Definition: bnxt.h:434
#define HWRM_FUNC_RESOURCE_QCAPS
Definition: bnxt_hsi.h:280
#define MAX_CQ_DESC_CNT
Definition: bnxt.h:145
static __always_inline void off_t userptr_t src
Definition: efi_uaccess.h:66
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID
Definition: bnxt_hsi.h:5511
#define bnxt_up_nic(bp)
Definition: bnxt.c:1868
void bnxt_mm_init(struct bnxt *bp, const char *func)
Definition: bnxt.c:446
#define BNXT_CQ_INTR_MODE(vf)
Definition: bnxt.h:159
#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM
Definition: bnxt.h:332
#define dbg_rx_cid(idx, cid)
Definition: bnxt_dbg.h:471
void bnxt_set_txq(struct bnxt *bp, int entry, dma_addr_t mapping, int len)
Definition: bnxt.c:230
__le16 default_cmpl_ring_id
Definition: bnxt_hsi.h:5520
#define dbg_rx_stat(bp)
Definition: bnxt_dbg.h:474
#define TX_BD_SHORT_FLAGS_LHINT_LT1K
Definition: bnxt.h:435
#define VALID_RING_NQ
Definition: bnxt.h:718
#define STATUS_FAILURE
Definition: bnxt.h:58
static int bnxt_rx_complete(struct net_device *dev, struct rx_pkt_cmpl *rx)
Definition: bnxt.c:426
#define dbg_alloc_rx_iob_fail(iob_idx, cons_id)
Definition: bnxt_dbg.h:472
#define HWRM_STAT_CTX_ALLOC
Definition: bnxt_hsi.h:190
static int bnxt_tx(struct net_device *dev, struct io_buffer *iob)
Definition: bnxt.c:1895
union aes_table_entry entry[256]
Table entries, indexed by S(N)
Definition: aes.c:26
#define DBC_DBC_TYPE_SRQ
Definition: bnxt.h:403
uint32_t tx
Maximum number of transmit queues.
Definition: intelvf.h:14
unsigned long pci_bar_size(struct pci_device *pci, unsigned int reg)
Find the size of a PCI BAR.
Definition: pciextra.c:90
#define CMPL_BASE_TYPE_TX_L2
Definition: bnxt.h:489
static unsigned int unsigned int reg
Definition: intel.h:245
static struct pci_device_id bnxt_nics[]
Definition: bnxt.h:935
#define VLAN_SHIFT
Definition: bnxt.h:861
#define DBC_DBC_TYPE_CQ_ARMALL
Definition: bnxt.h:407
#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
Definition: bnxt_hsi.h:1535
PCI bus.
static int bnxt_get_link_speed(struct bnxt *bp)
Definition: bnxt.c:1139
A PCI device.
Definition: pci.h:187
int register_netdev(struct net_device *netdev)
Register network device.
Definition: netdevice.c:667
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:151
#define LINK_SPEED_DRV_MASK
Definition: bnxt.h:205
__le16 req_type
Definition: bnxt_hsi.h:69
#define MEDIUM_SPEED_1000MBPS
Definition: bnxt.h:99
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT
Definition: bnxt.h:635
#define VALID_L2_FILTER
Definition: bnxt.h:717
#define HWRM_CMD_WAIT(b)
Definition: bnxt.h:135
A network device.
Definition: netdevice.h:348
static int bnxt_hwrm_vnic_free(struct bnxt *bp)
Definition: bnxt.c:1733
#define PORT_MAC_CFG_REQ_LPBK_NONE
Definition: bnxt_hsi.h:3378
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:511
#define ARRAY_SIZE(x)
Definition: efx_common.h:43
u16 type
Definition: bnxt.h:486
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB
Definition: bnxt_hsi.h:2970
#define dump_tx_pkt(pkt, len, idx)
Definition: bnxt_dbg.h:581
#define CQ_RING_BUFFER_SIZE
Definition: bnxt.h:149
hwrm_func_t bring_down_nic[]
Definition: bnxt.c:1799
static void hwrm_write_req(struct bnxt *bp, void *req, u32 cnt)
Definition: bnxt.c:574
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
Definition: bnxt_hsi.h:2958
#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
Definition: bnxt_hsi.h:6366
#define PHY_SPEED
Definition: bnxt.h:193
static void thor_db(struct bnxt *bp, u32 idx, u32 xid, u32 flag)
Definition: bnxt.c:126
static int bnxt_hwrm_ring_free_cq(struct bnxt *bp)
Definition: bnxt.c:1640
#define PORT_PHY_CFG_REQ_FLAGS_FORCE
Definition: bnxt_hsi.h:2942
#define FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM
Definition: bnxt.h:340
#define dump_tx_stat(bp)
Definition: bnxt_dbg.h:580
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE
Definition: bnxt_hsi.h:1735
#define RX_MASK_ACCEPT_ALL_MULTICAST
Definition: bnxt.h:88
static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp)
Definition: bnxt.c:1308
#define ETH_ALEN
Definition: if_ether.h:8
#define ETH_ZLEN
Definition: if_ether.h:10
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
Definition: bnxt.c:1267
#define STAT_CTX_ID
Definition: bnxt.h:172
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB
Definition: bnxt_hsi.h:2979
static void bnxt_hwrm_assign_resources(struct bnxt *bp)
Definition: bnxt.c:771
#define SHORT_REQ_SIGNATURE_SHORT_CMD
Definition: bnxt_hsi.h:88
FILE_LICENCE(GPL2_ONLY)
#define TX_BD_SHORT_FLAGS_LHINT_GTE2K
Definition: bnxt.h:437
static int bnxt_hwrm_port_mac_cfg(struct bnxt *bp)
Definition: bnxt.c:1293
#define VF_CFG_ENABLE_FLAGS
Definition: bnxt.h:864
#define HWRM_VNIC_CFG
Definition: bnxt_hsi.h:150
#define TX_BD_SHORT_FLAGS_LHINT_LT2K
Definition: bnxt.h:436
static int bnxt_hwrm_ring_alloc_nq(struct bnxt *bp)
Definition: bnxt.c:1685
static int is_valid_ether_addr(const void *addr)
Check if Ethernet address is valid.
Definition: ethernet.h:77
void * memmove(void *dest, const void *src, size_t len) __nonnull
static void bnxt_set_link(struct bnxt *bp)
Definition: bnxt.c:118
static int bnxt_query_phy_link(struct bnxt *bp)
Definition: bnxt.c:1374
#define HWRM_FUNC_DRV_UNRGTR
Definition: bnxt_hsi.h:112
#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT
Definition: bnxt_hsi.h:6247
#define ETHERTYPE_VLAN
Definition: bnxt.h:181
#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
Definition: bnxt_hsi.h:1534
#define dbg_chip_info(bp)
Definition: bnxt_dbg.h:324
u16 flags_type
Definition: bnxt.h:421
void __asmcall int val
Definition: setjmp.h:28
u8 bnxt_rx_drop(struct bnxt *bp, struct io_buffer *iob, struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len)
Definition: bnxt.c:340
#define FLAG_RESET(f, b)
Definition: bnxt.h:47
Network device operations.
Definition: netdevice.h:213
#define HWRM_RING_FREE
Definition: bnxt_hsi.h:160
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:470
static void bnxt_db_rx(struct bnxt *bp, u32 idx)
Definition: bnxt.c:160
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX
Definition: bnxt_hsi.h:2995
struct device * dev
Underlying hardware device.
Definition: netdevice.h:360
#define HWRM_VNIC_ALLOC
Definition: bnxt_hsi.h:148
#define HWRM_PORT_PHY_QCFG
Definition: bnxt_hsi.h:125
#define LINK_SPEED_DRV_10G
Definition: bnxt.h:211
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB
Definition: bnxt_hsi.h:2977
#define TX_IN_USE(a, b, c)
Definition: bnxt.h:174
static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
Definition: bnxt.c:1454
static int bnxt_hwrm_vnic_cfg(struct bnxt *bp)
Definition: bnxt.c:1756
#define MEDIUM_SPEED_AUTONEG
Definition: bnxt.h:95
Network device management.
#define CMPL_BASE_TYPE_STAT_EJECT
Definition: bnxt.h:494
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:348
u8 bnxt_is_pci_vf(struct pci_device *pdev)
Check if Virtual Function.
Definition: bnxt.c:29
#define MAX_TX_DESC_CNT
Definition: bnx2.h:3881
#define dbg_tx_done(pkt, len, idx)
Definition: bnxt_dbg.h:583
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition: timer.c:78
#define PCICFG_ME_REGISTER
Definition: bnxt.h:121
#define HWRM_NVM_GET_VARIABLE
Definition: bnxt_hsi.h:308
#define HWRM_VERSION_UPDATE
Definition: bnxt_hsi.h:370
struct net_device * dev
Definition: bnxt.h:738
#define LINK_DEFAULT_TIMEOUT
Definition: bnxt.h:162
#define CMPL_DOORBELL_KEY_CMPL
Definition: bnxt.h:380
Definition: bnxt.h:445
#define NO_MORE_CQ_BD_TO_SERVICE
Definition: bnxt.h:177
#define prn_set_speed(speed)
Definition: bnxt_dbg.h:323
#define RING_ALLOC_REQ_RING_TYPE_TX
Definition: bnxt_hsi.h:5900
static void short_hwrm_cmd_req(struct bnxt *bp, u16 len)
Definition: bnxt.c:585
static int bnxt_hwrm_ring_free_grp(struct bnxt *bp)
Definition: bnxt.c:1477
#define RX_MASK_ACCEPT_MULTICAST
Definition: bnxt.h:87
u32 opaque
Definition: bnxt.h:690
#define SPEED_DRV_SHIFT
Definition: bnxt.h:853
uint32_t len
Length.
Definition: ena.h:14
#define dbg_fw_ver(resp, tmo)
Definition: bnxt_dbg.h:319
#define ENOBUFS
No buffer space available.
Definition: errno.h:498
#define MEDIUM_FULL_DUPLEX
Definition: bnxt.h:115
static int bnxt_get_vlan(struct bnxt *bp)
Definition: bnxt.c:1203
#define VALID_STAT_CTX
Definition: bnxt.h:710
Media Independent Interface constants.
void bnxt_free_mem(struct bnxt *bp)
Definition: bnxt.c:494
#define VALID_RING_RX
Definition: bnxt.h:713
__le16 num_hw_ring_grps
Definition: bnxt_hsi.h:1562
#define FUNC_CFG_REQ_ENABLES_EVB_MODE
Definition: bnxt_hsi.h:1547
#define RING_ALLOC_REQ_RING_TYPE_RX
Definition: bnxt_hsi.h:5901
static void bnxt_adv_nq_index(struct bnxt *bp, u16 cnt)
Definition: bnxt.c:1923
#define dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx)
Definition: bnxt_dbg.h:468
__le16 target_id
Definition: bnxt_hsi.h:72
#define RING_FREE_REQ_RING_TYPE_L2_CMPL
Definition: bnxt_hsi.h:5981
static int bnxt_alloc_rx_iob(struct bnxt *bp, u16 cons_id, u16 iob_idx)
Definition: bnxt.c:296
#define RING_FREE_REQ_RING_TYPE_RX
Definition: bnxt_hsi.h:5983
void * data
Start of data.
Definition: iobuf.h:44
static int bnxt_hwrm_vnic_alloc(struct bnxt *bp)
Definition: bnxt.c:1710
#define LINK_SPEED_DRV_50G
Definition: bnxt.h:217
#define barrier()
Optimisation barrier.
Definition: compiler.h:655
static int bnxt_hwrm_backing_store_qcfg(struct bnxt *bp)
Definition: bnxt.c:1235
#define NQ_CN_V
Definition: bnxt.h:550
#define FUNC_CFG_REQ_ENABLES_NUM_MSIX
Definition: bnxt_hsi.h:1551
#define HWRM_PORT_PHY_CFG
Definition: bnxt_hsi.h:118
#define TX_RING_QID
Definition: bnxt.h:170
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
Definition: bnxt_hsi.h:1739
__le64 resp_addr
Definition: bnxt_hsi.h:73
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition: ethernet.c:264
#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
Definition: bnxt_hsi.h:1538
static void *__malloc malloc_dma(size_t size, size_t phys_align)
Allocate memory for DMA.
Definition: malloc.h:66
u16 errors_v2
Definition: bnxt.h:630
#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
Definition: bnxt_hsi.h:6367
static int bnxt_open(struct net_device *dev)
Definition: bnxt.c:1870
#define FUNC_CFG_REQ_ENABLES_NUM_VNICS
Definition: bnxt_hsi.h:1537
void iounmap(volatile const void *io_addr)
Unmap I/O address.
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
Definition: bnxt.c:1432
static void bnxt_poll(struct net_device *dev)
Definition: bnxt.c:2038
#define MAX_ETHERNET_PACKET_BUFFER_SIZE
Definition: bnxt.h:136
#define DEFAULT_NUMBER_OF_TX_RINGS
Definition: bnxt.h:138
#define RING_FREE_REQ_RING_TYPE_NQ
Definition: bnxt_hsi.h:5986
static int bnxt_set_rx_mask(struct bnxt *bp)
Definition: bnxt.c:1782
#define LINK_SPEED_DRV_200G
Definition: bnxt.h:221
#define HWRM_RING_ALLOC
Definition: bnxt_hsi.h:159
#define DMA_BUFFER_SIZE
Definition: bnxt.h:154
static int bnxt_hwrm_func_qcfg_req(struct bnxt *bp)
Definition: bnxt.c:819
#define SPEED_FW_MASK
Definition: bnxt.h:854
#define SERVICE_NEXT_CQ_BD
Definition: bnxt.h:178
#define SET_MEDIUM_DUPLEX(bp, d)
Definition: bnxt.h:118
int bnxt_vlan_drop(struct bnxt *bp, u16 rx_vlan)
Definition: bnxt.c:198
#define SPEED_DRV_MASK
Definition: bnxt.h:852
#define LINK_SPEED_DRV_100G
Definition: bnxt.h:219
#define DBC_DBC_TYPE_SQ
Definition: bnxt.h:401
static u16 bnxt_get_pkt_vlan(char *src)
Definition: bnxt.c:191
void mb(void)
Memory barrier.
#define dbg_link_status(bp)
Definition: bnxt_dbg.h:675
#define RX_DOORBELL_KEY_RX
Definition: bnxt.h:368
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL
Definition: bnxt_hsi.h:5899
#define RX_PKT_CMPL_V2
Definition: bnxt.h:631
#define PCI_SUBSYSTEM_VENDOR_ID
PCI subsystem vendor ID.
Definition: pci.h:74
#define NEXT_IDX(N, S)
Definition: bnxt.h:157
#define BNXT_FLAG_HWRM_SHORT_CMD_SUPP
Definition: bnxt.h:48
#define VALID_RING_TX
Definition: bnxt.h:712
#define dbg_num_rings(bp)
Definition: bnxt_dbg.h:325
#define MEDIUM_SPEED_40GBPS
Definition: bnxt.h:104
#define LINK_SPEED_DRV_1G
Definition: bnxt.h:209
#define TX_DOORBELL_KEY_TX
Definition: bnxt.h:358
#define dbg_link_state(bp, tmo)
Definition: bnxt_dbg.h:676
#define RX_RING_QID
Definition: bnxt.h:171
#define FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM
Definition: bnxt.h:344
#define RING_ALLOC_REQ_INT_MODE_POLL
Definition: bnxt_hsi.h:5955
#define dbg_func_qcfg(bp)
Definition: bnxt_dbg.h:322
u32 set_rx_mask(u32 rx_mask)
Definition: bnxt.c:1051
#define PCI_BASE_ADDRESS_4
Definition: pci.h:65
u32 metadata
Definition: bnxt.h:622
#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE
Definition: bnxt_hsi.h:5499
void * ioremap(unsigned long bus_addr, size_t len)
Map bus address as an I/O address.
static void bnxt_service_nq(struct net_device *dev)
Definition: bnxt.c:2000
Definition: bnxt.h:524
#define HWRM_VERSION_MAJOR
Definition: bnxt_hsi.h:368
hwrm_func_t bring_down_chip[]
Definition: bnxt.c:1794
#define MEDIUM_SPEED_100GBPS
Definition: bnxt.h:106
u16 len
Definition: bnxt.h:595
hwrm_func_t bring_up_nic[]
Definition: bnxt.c:1830
static void free_dma(void *ptr, size_t size)
Free memory allocated with malloc_dma()
Definition: malloc.h:81
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE
Definition: bnxt_hsi.h:2060
#define dbg_tx_pad(plen, len)
Definition: bnxt_dbg.h:579
static void bnxt_set_rx_desc(u8 *buf, struct io_buffer *iob, u16 cid, u32 idx)
Definition: bnxt.c:283
#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME
Definition: bnxt_hsi.h:1175
static struct net_device_operations bnxt_netdev_ops
Definition: bnxt.c:2062
#define HWRM_FUNC_BACKING_STORE_QCFG
Definition: bnxt_hsi.h:284
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR
Definition: bnxt_hsi.h:6214
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB
Definition: bnxt_hsi.h:2975
#define HWRM_RING_GRP_FREE
Definition: bnxt_hsi.h:166
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0
Definition: bnxt_hsi.h:1426
#define BNXT_FLAG_RESOURCE_QCAPS_SUPPORT
Definition: bnxt.h:50
#define FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED
Definition: bnxt.h:349
#define NULL
NULL pointer (VOID *)
Definition: Base.h:362
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
#define SET_LINK(p, m, s)
Definition: bnxt.h:850
__le16 max_req_win_len
Definition: bnxt_hsi.h:438
#define DBC_DBC_TYPE_NQ_ARM
Definition: bnxt.h:412
#define CMPL_BASE_TYPE_MASK
Definition: bnxt.h:487
#define SET_MEDIUM_SPEED(bp, s)
Definition: bnxt.h:113
void iob_pad(struct io_buffer *iobuf, size_t min_len)
Pad I/O buffer.
Definition: iobpad.c:49
static int bnxt_hwrm_backing_store_cfg(struct bnxt *bp)
Definition: bnxt.c:1250
#define VLAN_MASK
Definition: bnxt.h:860
#define GRC_COM_CHAN_TRIG
Definition: bnxt.h:123
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST
Definition: bnxt_hsi.h:6206
struct bofm_section_header done
Definition: bofm_test.c:46
#define HWRM_MAX_REQ_LEN
Definition: bnxt_hsi.h:363
#define HWRM_CMD_POLL_WAIT_TIME
Definition: bnxt.h:131
uint8_t u8
Definition: stdint.h:19
#define HWRM_VNIC_FREE
Definition: bnxt_hsi.h:149
uint32_t u32
Definition: stdint.h:23
#define MEDIA_AUTO_DETECT_SHIFT
Definition: bnxt.h:859
#define VALID_RING_GRP
Definition: bnxt.h:714
#define DMA_ALIGN_4K
Definition: bnxt.h:151
union dma_addr64_t dma
Definition: bnxt.h:691
Definition: bnxt.h:720
u16 flags_type
Definition: bnxt.h:678
#define IPXE_VERSION_MINOR
Definition: bnxt.h:39
#define GRC_COM_CHAN_BASE
Definition: bnxt.h:122
uint16_t flag
Flag number.
Definition: hyperv.h:14
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT
Definition: bnxt_hsi.h:5454
#define HWRM_CMD_DEFAULT_TIMEOUT
Definition: bnxt.h:130
static int bnxt_get_phy_link(struct bnxt *bp)
Definition: bnxt.c:1409
if(natsemi->flags &NATSEMI_64BIT) return 1
union dma_addr64_t dma
Definition: bnxt.h:442
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX
Definition: bnxt_hsi.h:2957
void * memset(void *dest, int character, size_t len) __nonnull
A persistent I/O buffer.
Definition: iobuf.h:32
uint8_t flags
Flags.
Definition: ena.h:18
#define DBC_MSG_XID(xid, flg)
Definition: bnxt.h:189
struct pci_driver bnxt_pci_driver __pci_driver
Definition: bnxt.c:2165
#define VALID_RX_IOB
Definition: bnxt.h:716
int bnxt_hwrm_ring_free(struct bnxt *bp, u16 ring_id, u8 ring_type)
Definition: bnxt.c:1531
#define LINK_SPEED_DRV_25G
Definition: bnxt.h:213
static void bnxt_db_cq(struct bnxt *bp)
Definition: bnxt.c:150