iPXE
vxge_traffic.c
Go to the documentation of this file.
1 /*
2  * vxge-traffic.c: iPXE driver for Neterion Inc's X3100 Series 10GbE
3  * PCIe I/O Virtualized Server Adapter.
4  *
5  * Copyright(c) 2002-2010 Neterion Inc.
6  *
7  * This software may be used and distributed according to the terms of
8  * the GNU General Public License (GPL), incorporated herein by
9  * reference. Drivers based on or derived from this code fall under
10  * the GPL and must retain the authorship, copyright and license
11  * notice.
12  *
13  */
14 
15 FILE_LICENCE(GPL2_ONLY);
16 
17 #include <ipxe/netdevice.h>
18 #include <string.h>
19 #include <errno.h>
20 
21 #include "vxge_traffic.h"
22 #include "vxge_config.h"
23 #include "vxge_main.h"
24 
25 /*
26  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
27  * @vpath: Virtual Path handle.
28  *
29  * Enable vpath interrupts. The function is to be executed the last in
30  * vpath initialization sequence.
31  *
32  * See also: vxge_hw_vpath_intr_disable()
33  */
34 enum vxge_hw_status
36 {
37  struct vxge_hw_vpath_reg *vp_reg;
39 
40  if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
42  goto exit;
43  }
44 
45  vp_reg = vpath->vp_reg;
46 
48 
50  &vp_reg->general_errors_reg);
51 
53  &vp_reg->pci_config_errors_reg);
54 
56  &vp_reg->mrpcim_to_vpath_alarm_reg);
57 
59  &vp_reg->srpcim_to_vpath_alarm_reg);
60 
62  &vp_reg->vpath_ppif_int_status);
63 
65  &vp_reg->srpcim_msg_to_vpath_reg);
66 
68  &vp_reg->vpath_pcipif_int_status);
69 
71  &vp_reg->prc_alarm_reg);
72 
74  &vp_reg->wrdma_alarm_status);
75 
77  &vp_reg->asic_ntwk_vp_err_reg);
78 
80  &vp_reg->xgmac_vp_int_status);
81 
83 
84  /* Mask unwanted interrupts */
86  &vp_reg->vpath_pcipif_int_mask);
87 
89  &vp_reg->srpcim_msg_to_vpath_mask);
90 
93 
96 
98  &vp_reg->pci_config_errors_mask);
99 
100  /* Unmask the individual interrupts */
105  &vp_reg->general_errors_mask);
106 
114  &vp_reg->kdfcctl_errors_mask);
115 
117 
120  &vp_reg->prc_alarm_mask);
121 
124 
125  if (vpath->hldev->first_vp_id != vpath->vp_id)
127  &vp_reg->asic_ntwk_vp_err_mask);
128  else
132  0, 32), &vp_reg->asic_ntwk_vp_err_mask);
133 
135 exit:
136  return status;
137 
138 }
139 
140 /*
141  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
142  * @vpath: Virtual Path handle.
143  *
144  * Disable vpath interrupts. The function is to be executed the last in
145  * vpath initialization sequence.
146  *
147  * See also: vxge_hw_vpath_intr_enable()
148  */
149 enum vxge_hw_status
151 {
153  struct vxge_hw_vpath_reg __iomem *vp_reg;
154 
155  if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
157  goto exit;
158  }
159  vp_reg = vpath->vp_reg;
160 
162  &vp_reg->vpath_general_int_mask);
163 
165 
167  &vp_reg->general_errors_mask);
168 
170  &vp_reg->pci_config_errors_mask);
171 
173  &vp_reg->mrpcim_to_vpath_alarm_mask);
174 
176  &vp_reg->srpcim_to_vpath_alarm_mask);
177 
179  &vp_reg->vpath_ppif_int_mask);
180 
182  &vp_reg->srpcim_msg_to_vpath_mask);
183 
185  &vp_reg->vpath_pcipif_int_mask);
186 
188  &vp_reg->wrdma_alarm_mask);
189 
191  &vp_reg->prc_alarm_mask);
192 
194  &vp_reg->xgmac_vp_int_mask);
195 
197  &vp_reg->asic_ntwk_vp_err_mask);
198 
199 exit:
200  return status;
201 }
202 
203 /**
204  * vxge_hw_device_mask_all - Mask all device interrupts.
205  * @hldev: HW device handle.
206  *
207  * Mask all device interrupts.
208  *
209  * See also: vxge_hw_device_unmask_all()
210  */
212 {
213  u64 val64;
214 
217 
219  &hldev->common_reg->titan_mask_all_int);
220 
221  return;
222 }
223 
224 /**
225  * vxge_hw_device_unmask_all - Unmask all device interrupts.
226  * @hldev: HW device handle.
227  *
228  * Unmask all device interrupts.
229  *
230  * See also: vxge_hw_device_mask_all()
231  */
233 {
235 
237  &hldev->common_reg->titan_mask_all_int);
238 
239  return;
240 }
241 
242 /**
243  * vxge_hw_device_intr_enable - Enable interrupts.
244  * @hldev: HW device handle.
245  *
246  * Enable Titan interrupts. The function is to be executed the last in
247  * Titan initialization sequence.
248  *
249  * See also: vxge_hw_device_intr_disable()
250  */
252 {
253  u64 val64;
254  u32 val32;
255 
257 
259 
260  val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
262 
263  if (val64 != 0) {
264  writeq(val64, &hldev->common_reg->tim_int_status0);
265 
266  writeq(~val64, &hldev->common_reg->tim_int_mask0);
267  }
268 
269  val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
271 
272  if (val32 != 0) {
274  &hldev->common_reg->tim_int_status1);
275 
277  &hldev->common_reg->tim_int_mask1);
278  }
279 
280  val64 = readq(&hldev->common_reg->titan_general_int_status);
281 
282  /* We have not enabled the top level interrupt yet.
283  * This will be controlled from vxge_irq() entry api.
284  */
285  return;
286 }
287 
288 /**
289  * vxge_hw_device_intr_disable - Disable Titan interrupts.
290  * @hldev: HW device handle.
291  *
292  * Disable Titan interrupts.
293  *
294  * See also: vxge_hw_device_intr_enable()
295  */
297 {
299 
300  /* mask all the tim interrupts */
303  &hldev->common_reg->tim_int_mask1);
304 
306 
307  return;
308 }
309 
310 /**
311  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
312  * @ring: Handle to the ring object used for receive
313  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
314  *
315  * Post descriptor on the ring.
316  * Prior to posting the descriptor should be filled in accordance with
317  * Host/Titan interface specification for a given service (LL, etc.).
318  */
320  struct vxge_hw_ring_rxd_1 *rxdp)
321 {
323 }
324 
325 /**
326  * __vxge_hw_non_offload_db_post - Post non offload doorbell
327  *
328  * @fifo: fifohandle
329  * @txdl_ptr: The starting location of the TxDL in host memory
330  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
331  *
332  * This function posts a non-offload doorbell to doorbell FIFO
333  *
334  */
336  u64 txdl_ptr, u32 num_txds)
337 {
340  &fifo->nofl_db->control_0);
341 
342  wmb();
343 
344  writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
345 
346  wmb();
347 }
348 
349 /**
350  * vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
351  *
352  * @fifo: tx channel handle
353  */
354 struct vxge_hw_fifo_txd *
356 {
357  struct vxge_hw_fifo_txd *txdp;
358 
359  txdp = fifo->txdl + fifo->sw_offset;
360  if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
361  vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
362  __func__, __LINE__, fifo->sw_offset);
363  return NULL;
364  }
365 
366  return txdp;
367 }
368 /**
369  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
370  * descriptor.
371  * @fifo: Handle to the fifo object used for non offload send
372  * @txdlh: Descriptor handle.
373  * @iob: data buffer.
374  */
376  struct vxge_hw_fifo_txd *txdp,
377  struct io_buffer *iob)
378 {
379  txdp->control_0 = VXGE_HW_FIFO_TXD_GATHER_CODE(
381  txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
382 
383  txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
385 
386  txdp->host_control = (intptr_t)iob;
387  txdp->buffer_pointer = virt_to_bus(iob->data);
388 }
389 
390 /**
391  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
392  * @fifo: Handle to the fifo object used for non offload send
393  * @txdp: Tx Descriptor
394  *
395  * Post descriptor on the 'fifo' type channel for transmission.
396  * Prior to posting the descriptor should be filled in accordance with
397  * Host/Titan interface specification for a given service (LL, etc.).
398  *
399  */
401  struct vxge_hw_fifo_txd *txdp)
402 {
404 
406 
408 }
409 
410 /*
411  * __vxge_hw_vpath_alarm_process - Process Alarms.
412  * @vpath: Virtual Path.
413  * @skip_alarms: Do not clear the alarms
414  *
415  * Process vpath alarms.
416  *
417  */
419  struct __vxge_hw_virtualpath *vpath)
420 {
421  u64 val64;
422  u64 alarm_status;
424  struct __vxge_hw_device *hldev = NULL;
425  struct vxge_hw_vpath_reg *vp_reg;
426 
427  hldev = vpath->hldev;
428  vp_reg = vpath->vp_reg;
429  alarm_status = readq(&vp_reg->vpath_general_int_status);
430 
431  if (alarm_status == VXGE_HW_ALL_FOXES) {
432 
433  vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
434  hldev->ndev->name, __func__, __LINE__);
436  goto out;
437  }
438 
439  if (alarm_status & ~(
444 
445  vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
446  hldev->ndev->name, __func__, __LINE__);
448  goto out;
449  }
450 
451  if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
452 
453  val64 = readq(&vp_reg->xgmac_vp_int_status);
454 
455  if (val64 &
457 
458  val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
459 
460  if (((val64 &
462  (!(val64 &
464  ((val64 &
466  && (!(val64 &
468  ))) {
470  &vp_reg->asic_ntwk_vp_err_mask);
471 
472  netdev_link_down(hldev->ndev);
473  vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
474  hldev->ndev->name, __func__, __LINE__);
475  }
476 
477  if (((val64 &
479  (!(val64 &
481  ((val64 &
483  && (!(val64 &
485  ))) {
487  &vp_reg->asic_ntwk_vp_err_mask);
488 
489  netdev_link_up(hldev->ndev);
490  vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
491  hldev->ndev->name, __func__, __LINE__);
492  }
493 
495  &vp_reg->asic_ntwk_vp_err_reg);
496  }
497  } else {
498  vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
499  hldev->ndev->name, __func__, __LINE__,
500  alarm_status);
501  }
502 out:
503  return status;
504 }
505 
506 /**
507  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
508  * condition that has caused the Tx and RX interrupt.
509  * @hldev: HW device.
510  *
511  * Acknowledge (that is, clear) the condition that has caused
512  * the Tx and Rx interrupt.
513  * See also: vxge_hw_device_begin_irq(),
514  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
515  */
517 {
518 
519  if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
520  (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
523  &hldev->common_reg->tim_int_status0);
524  }
525 
526  if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
527  (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
531  &hldev->common_reg->tim_int_status1);
532  }
533 
534  return;
535 }
536 
537 
538 /**
539  * vxge_hw_device_begin_irq - Begin IRQ processing.
540  * @hldev: HW device handle.
541  *
542  * The function performs two actions, It first checks whether (shared IRQ) the
543  * interrupt was raised by the device. Next, it masks the device interrupts.
544  *
545  * Note:
546  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
547  * bridge. Therefore, two back-to-back interrupts are potentially possible.
548  *
549  * Returns: 0, if the interrupt is not "ours" (note that in this case the
550  * device remain enabled).
551  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
552  * status.
553  */
554 enum vxge_hw_status
556 {
557  u64 val64;
559  u64 vpath_mask;
560  enum vxge_hw_status ret = VXGE_HW_OK;
561 
562  val64 = readq(&hldev->common_reg->titan_general_int_status);
563 
564  if (!val64) {
565  ret = VXGE_HW_ERR_WRONG_IRQ;
566  goto exit;
567  }
568 
569  if (val64 == VXGE_HW_ALL_FOXES) {
570 
572 
574 
575  vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
576  "occurred\n", hldev->ndev->name,
577  __func__, __LINE__);
579  goto exit;
580  }
581  }
582 
583  vpath_mask = hldev->vpaths_deployed >>
586  vpath_mask))
588 
591 
592 exit:
593  return ret;
594 }
595 
596 /**
597  * vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive
598  * descriptors posted.
599  * @ring: Handle to the ring object used for receive
600  *
601  * The function writes the number of qwords of rxds posted during replishment.
602  * Since the function is called frequently, a flush is not required to post the
603  * write transaction. At the very least, the previous write will be flushed
604  * once the subsequent write is made.
605  *
606  * Returns: None.
607  */
609 {
610  u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
612 
614 
616 
617  if (ring->total_db_cnt >= rxds_qw_per_block) {
618  /* For each block add 4 more qwords */
620 
621  /* Reset total count */
622  ring->total_db_cnt -= rxds_qw_per_block;
623  }
624 
625  if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
626  wmb();
628  ring->doorbell_cnt),
629  &ring->vp_reg->prc_rxd_doorbell);
630  ring->doorbell_cnt = 0;
631  }
632 }
633 
634 /**
635  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
636  * descriptors and process the same.
637  * @ring: Handle to the ring object used for receive
638  *
639  * The function polls the Rx for the completed descriptors.
640  */
641 #define ETH_FCS_LEN 4
643 {
644  struct __vxge_hw_device *hldev;
646  struct vxge_hw_ring_rxd_1 *rxd;
647  unsigned int len;
648  enum vxge_hw_ring_tcode tcode;
649  struct io_buffer *rx_iob, *iobuf = NULL;
650  u16 poll_count = 0;
651 
652  hldev = ring->vpathh->hldev;
653 
654  do {
655  rxd = &ring->rxdl->rxd[ring->rxd_offset];
656  tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
657 
658  /* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
659  * possible the ownership bit still set to adapter
660  */
661  if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
662  && (tcode == VXGE_HW_RING_T_CODE_OK)) {
663 
665  goto err0;
666  }
667 
668  vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
669  hldev->ndev->name, ring->rxd_offset);
670 
671  iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
672 
673  if (tcode != VXGE_HW_RING_T_CODE_OK) {
674  netdev_rx_err(hldev->ndev, NULL, -EINVAL);
675  vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
676  __func__, __LINE__, tcode);
678  goto err1;
679  }
680 
682  len -= ETH_FCS_LEN;
683 
684  rx_iob = alloc_iob(len);
685  if (!rx_iob) {
686  netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
687  vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
688  __func__, __LINE__);
690  goto err1;
691  }
692 
693  memcpy(iob_put(rx_iob, len), iobuf->data, len);
694  /* Add this packet to the receive queue. */
695  netdev_rx(hldev->ndev, rx_iob);
696 
697 err1:
698  /* repost the rxd */
699  rxd->control_0 = rxd->control_1 = 0;
701  VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
702  vxge_hw_ring_rxd_post(ring, rxd);
703 
704  /* repost the qword count for doorbell */
706 
707  /* increment the descriptor offset */
709 
710  } while (++poll_count < ring->rx_poll_weight);
711 err0:
712  return status;
713 }
714 
715 /**
716  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
717  * the same.
718  * @fifo: Handle to the fifo object used for non offload send
719  *
720  * The function polls the Tx for the completed descriptors and calls
721  * the driver via supplied completion callback.
722  */
724 {
726  struct vxge_hw_fifo_txd *txdp;
727 
728  txdp = fifo->txdl + fifo->hw_offset;
729  if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
730  && (txdp->host_control)) {
731 
732  vxge_xmit_compl(fifo, txdp,
733  VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0));
734 
736  }
737 
738  return status;
739 }
u64 tim_int_mask0[4]
Definition: vxge_config.h:511
uint16_t u16
Definition: stdint.h:21
enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)
Definition: vxge_traffic.c:35
#define EINVAL
Invalid argument.
Definition: errno.h:428
#define VXGE_HW_NODBW_TYPE(val)
Definition: vxge_config.h:294
wmb()
#define iob_put(iobuf, len)
Definition: iobuf.h:124
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:586
struct vxge_hw_fifo_txd - Transmit Descriptor
Definition: vxge_config.h:212
#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1)
Definition: vxge_config.h:194
Error codes.
u64 mrpcim_to_vpath_alarm_mask
Definition: vxge_reg.h:4545
uint64_t readq(volatile uint64_t *io_addr)
Read 64-bit qword from memory-mapped device.
struct __vxge_hw_virtualpath * vpathh
Definition: vxge_config.h:379
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT
Definition: vxge_reg.h:470
struct __vxge_hw_virtualpath virtual_path
Definition: vxge_config.h:507
#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST
Definition: vxge_config.h:239
#define ETH_FCS_LEN
vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed descriptors and process the same.
Definition: vxge_traffic.c:641
u64 adapter_status
Definition: vxge_reg.h:137
u64 asic_ntwk_vp_err_mask
Definition: vxge_reg.h:4252
struct vxge_hw_common_reg * common_reg
Definition: vxge_config.h:497
void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused, struct vxge_hw_ring_rxd_1 *rxdp)
vxge_hw_ring_rxd_post - Post descriptor on the ring.
Definition: vxge_traffic.c:319
static void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
Definition: vxge_config.h:703
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR
Definition: vxge_reg.h:499
struct __vxge_hw_non_offload_db_wrapper * nofl_db
Definition: vxge_config.h:324
u64 srpcim_to_vpath_alarm_reg
Definition: vxge_reg.h:4547
static void vxge_hw_ring_rxd_offset_up(u16 *offset)
Definition: vxge_config.h:570
static void vxge_hw_fifo_txd_offset_up(u16 *offset)
Definition: vxge_config.h:576
#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0)
Definition: vxge_config.h:167
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:230
u64 vpath_pcipif_int_mask
Definition: vxge_reg.h:4470
unsigned long intptr_t
Definition: stdint.h:21
#define VXGE_HW_INTR_MASK_ALL
Definition: vxge_traffic.h:29
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:130
__be32 out[4]
Definition: CIB_PRM.h:36
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT
Definition: vxge_reg.h:468
#define VXGE_INFO
Definition: vxge_config.h:50
#define VXGE_HW_VPATH_INTR_RX
Definition: vxge_traffic.h:131
#define vxge_debug(mask, fmt...)
Definition: vxge_config.h:762
#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
Definition: vxge_config.h:159
#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT
Definition: vxge_reg.h:115
void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
vxge_hw_device_intr_disable - Disable Titan interrupts.
Definition: vxge_traffic.c:296
#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW
Definition: vxge_reg.h:507
struct vxgedev * vdev
Definition: vxge_config.h:484
u64 srpcim_to_vpath_alarm_mask
Definition: vxge_reg.h:4550
struct vxge_hw_fifo_txd * vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo)
vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
Definition: vxge_traffic.c:355
enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo)
vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
Definition: vxge_traffic.c:723
#define ENOMEM
Not enough space.
Definition: errno.h:534
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR
Definition: vxge_reg.h:492
struct vxge_hw_vpath_reg * vp_reg
Definition: vxge_config.h:361
void * memcpy(void *dest, const void *src, size_t len) __nonnull
void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
vxge_hw_device_unmask_all - Unmask all device interrupts.
Definition: vxge_traffic.c:232
#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR
Definition: vxge_reg.h:508
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
Definition: vxge_config.h:214
enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
Definition: vxge_traffic.c:642
#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK
Definition: vxge_reg.h:229
#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val)
Definition: vxge_config.h:226
void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp, struct io_buffer *iob)
vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the descriptor.
Definition: vxge_traffic.c:375
void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp)
vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
Definition: vxge_traffic.c:400
#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT
Definition: vxge_reg.h:217
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
Definition: vxge_reg.h:221
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
u64 asic_ntwk_vp_err_reg
Definition: vxge_reg.h:4242
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:788
ring len
Length.
Definition: dwmac.h:231
void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
vxge_hw_device_intr_enable - Enable interrupts.
Definition: vxge_traffic.c:251
#define VXGE_HW_FIFO_TXD_GATHER_CODE(val)
Definition: vxge_config.h:220
#define VXGE_HW_MAX_VIRTUAL_PATHS
Definition: vxge_traffic.h:30
#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW
Definition: vxge_reg.h:506
uint64_t u64
Definition: stdint.h:25
FILE_LICENCE(GPL2_ONLY)
u32 tim_int_mask1[4]
Definition: vxge_config.h:512
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON
Definition: vxge_reg.h:495
#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val)
Definition: vxge_reg.h:116
struct __vxge_hw_ring_block * rxdl
Definition: vxge_config.h:370
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON
Definition: vxge_reg.h:496
#define VXGE_HW_RING_RXD_QWORDS_MODE_1
Definition: vxge_config.h:364
#define VXGE_LL_MAX_FRAME_SIZE(dev)
Definition: vxge_main.h:147
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR
Definition: vxge_reg.h:223
static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, u64 txdl_ptr, u32 num_txds)
__vxge_hw_non_offload_db_post - Post non offload doorbell
Definition: vxge_traffic.c:335
#define vxge_bVALn(bits, loc, n)
Definition: vxge_reg.h:35
u64 vpath_ppif_int_status
Definition: vxge_reg.h:4498
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:159
#define __iomem
Definition: igbvf_osdep.h:46
#define VXGE_HW_DEFAULT_32
Definition: vxge_traffic.h:36
#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP
Definition: vxge_reg.h:29
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR
Definition: vxge_reg.h:225
u64 mrpcim_to_vpath_alarm_reg
Definition: vxge_reg.h:4542
#define VXGE_HW_VPATH_INTR_TX
Definition: vxge_traffic.h:130
Definition: sis900.h:30
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR
Definition: vxge_reg.h:498
uint8_t status
Status.
Definition: ena.h:16
#define VXGE_HW_NODBW_TYPE_NODBW
Definition: vxge_config.h:295
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:548
u64 srpcim_msg_to_vpath_mask
Definition: vxge_reg.h:4476
#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0)
Definition: vxge_config.h:216
Network device management.
struct __vxge_hw_device - Hal device object @magic: Magic Number @bar0: BAR0 virtual address.
Definition: vxge_config.h:477
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
Definition: vxge_reg.h:222
#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val)
Definition: vxge_reg.h:77
char name[NETDEV_NAME_LEN]
Name of this network device.
Definition: netdevice.h:362
enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev)
vxge_hw_device_begin_irq - Begin IRQ processing.
Definition: vxge_traffic.c:555
struct vxge_hw_vpath_reg * vp_reg
Definition: vxge_config.h:397
#define VXGE_HW_ALL_FOXES
Definition: vxge_traffic.h:28
void * data
Start of data.
Definition: iobuf.h:52
static void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp, struct io_buffer *iob, u32 size)
vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
Definition: vxge_config.h:597
enum vxge_hw_status vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
Definition: vxge_traffic.c:150
vxge_hw_ring_tcode
enum vxge_hw_ring_tcode - Transfer codes returned by adapter @VXGE_HW_RING_T_CODE_OK: Transfer ok.
Definition: vxge_traffic.h:172
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR
Definition: vxge_reg.h:493
u64 vpath_general_int_mask
Definition: vxge_reg.h:4493
u64 srpcim_msg_to_vpath_reg
Definition: vxge_reg.h:4473
vxge_hw_status
Definition: vxge_config.h:70
#define VXGE_INTR
Definition: vxge_config.h:51
Definition: sis900.h:32
#define VXGE_HW_FIFO_TXD_INT_NUMBER(val)
Definition: vxge_config.h:237
static enum vxge_hw_status __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath)
Definition: vxge_traffic.c:418
#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
Definition: vxge_reg.h:122
struct __vxge_hw_device * hldev
Definition: vxge_config.h:396
#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val)
Definition: vxge_config.h:298
#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM
Definition: vxge_reg.h:121
u64 vpath_pcipif_int_status
Definition: vxge_reg.h:4464
u64 vpath_general_int_status
Definition: vxge_reg.h:4488
struct vxge_hw_fifo_txd * txdl
Definition: vxge_config.h:328
#define VXGE_HW_VP_NOT_OPEN
Definition: vxge_config.h:393
enum vxge_hw_status vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode)
Definition: vxge_main.c:65
u64 pci_config_errors_reg
Definition: vxge_reg.h:4536
u64 titan_general_int_status
Definition: vxge_reg.h:803
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT
Definition: vxge_reg.h:469
struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1]
Definition: vxge_config.h:342
struct net_device * ndev
Definition: vxge_config.h:483
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
String functions.
void writeq(uint64_t data, volatile uint64_t *io_addr)
Write 64-bit qword to memory-mapped device.
struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
Definition: vxge_config.h:154
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT
Definition: vxge_reg.h:467
u64 pci_config_errors_mask
Definition: vxge_reg.h:4540
uint32_t u32
Definition: stdint.h:23
#define VXGE_ERR
Definition: vxge_config.h:54
#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ
Definition: vxge_reg.h:509
void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
vxge_hw_device_mask_all - Mask all device interrupts.
Definition: vxge_traffic.c:211
#define VXGE_HW_MAX_RXDS_PER_BLOCK_1
Definition: vxge_config.h:341
#define rxd
Definition: davicom.c:146
void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.
Definition: vxge_traffic.c:608
#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT
Definition: vxge_reg.h:227
A persistent I/O buffer.
Definition: iobuf.h:37
void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the condition that has caused the Tx and RX...
Definition: vxge_traffic.c:516