iPXE
vxge_traffic.c
Go to the documentation of this file.
1/*
2 * vxge-traffic.c: iPXE driver for Neterion Inc's X3100 Series 10GbE
3 * PCIe I/O Virtualized Server Adapter.
4 *
5 * Copyright(c) 2002-2010 Neterion Inc.
6 *
7 * This software may be used and distributed according to the terms of
8 * the GNU General Public License (GPL), incorporated herein by
9 * reference. Drivers based on or derived from this code fall under
10 * the GPL and must retain the authorship, copyright and license
11 * notice.
12 *
13 */
14
15FILE_LICENCE(GPL2_ONLY);
16
17#include <ipxe/netdevice.h>
18#include <string.h>
19#include <errno.h>
20
21#include "vxge_traffic.h"
22#include "vxge_config.h"
23#include "vxge_main.h"
24
25/*
26 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
27 * @vpath: Virtual Path handle.
28 *
29 * Enable vpath interrupts. The function is to be executed the last in
30 * vpath initialization sequence.
31 *
32 * See also: vxge_hw_vpath_intr_disable()
33 */
36{
37 struct vxge_hw_vpath_reg *vp_reg;
39
40 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
42 goto exit;
43 }
44
45 vp_reg = vpath->vp_reg;
46
48
50 &vp_reg->general_errors_reg);
51
53 &vp_reg->pci_config_errors_reg);
54
57
60
62 &vp_reg->vpath_ppif_int_status);
63
66
69
71 &vp_reg->prc_alarm_reg);
72
74 &vp_reg->wrdma_alarm_status);
75
77 &vp_reg->asic_ntwk_vp_err_reg);
78
80 &vp_reg->xgmac_vp_int_status);
81
83
84 /* Mask unwanted interrupts */
86 &vp_reg->vpath_pcipif_int_mask);
87
90
93
96
98 &vp_reg->pci_config_errors_mask);
99
100 /* Unmask the individual interrupts */
105 &vp_reg->general_errors_mask);
106
114 &vp_reg->kdfcctl_errors_mask);
115
117
120 &vp_reg->prc_alarm_mask);
121
124
125 if (vpath->hldev->first_vp_id != vpath->vp_id)
127 &vp_reg->asic_ntwk_vp_err_mask);
128 else
132 0, 32), &vp_reg->asic_ntwk_vp_err_mask);
133
135exit:
136 return status;
137
138}
139
140/*
141 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
142 * @vpath: Virtual Path handle.
143 *
144 * Disable vpath interrupts. The function is to be executed the last in
145 * vpath initialization sequence.
146 *
147 * See also: vxge_hw_vpath_intr_enable()
148 */
151{
153 struct vxge_hw_vpath_reg __iomem *vp_reg;
154
155 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
157 goto exit;
158 }
159 vp_reg = vpath->vp_reg;
160
162 &vp_reg->vpath_general_int_mask);
163
165
167 &vp_reg->general_errors_mask);
168
170 &vp_reg->pci_config_errors_mask);
171
174
177
179 &vp_reg->vpath_ppif_int_mask);
180
182 &vp_reg->srpcim_msg_to_vpath_mask);
183
185 &vp_reg->vpath_pcipif_int_mask);
186
188 &vp_reg->wrdma_alarm_mask);
189
191 &vp_reg->prc_alarm_mask);
192
194 &vp_reg->xgmac_vp_int_mask);
195
197 &vp_reg->asic_ntwk_vp_err_mask);
198
199exit:
200 return status;
201}
202
203/**
204 * vxge_hw_device_mask_all - Mask all device interrupts.
205 * @hldev: HW device handle.
206 *
207 * Mask all device interrupts.
208 *
209 * See also: vxge_hw_device_unmask_all()
210 */
212{
213 u64 val64;
214
217
220
221 return;
222}
223
224/**
225 * vxge_hw_device_unmask_all - Unmask all device interrupts.
226 * @hldev: HW device handle.
227 *
228 * Unmask all device interrupts.
229 *
230 * See also: vxge_hw_device_mask_all()
231 */
233{
235
238
239 return;
240}
241
242/**
243 * vxge_hw_device_intr_enable - Enable interrupts.
244 * @hldev: HW device handle.
245 *
246 * Enable Titan interrupts. The function is to be executed the last in
247 * Titan initialization sequence.
248 *
249 * See also: vxge_hw_device_intr_disable()
250 */
252{
253 u64 val64;
254 u32 val32;
255
257
259
260 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
262
263 if (val64 != 0) {
264 writeq(val64, &hldev->common_reg->tim_int_status0);
265
266 writeq(~val64, &hldev->common_reg->tim_int_mask0);
267 }
268
269 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
271
272 if (val32 != 0) {
274 &hldev->common_reg->tim_int_status1);
275
277 &hldev->common_reg->tim_int_mask1);
278 }
279
280 val64 = readq(&hldev->common_reg->titan_general_int_status);
281
282 /* We have not enabled the top level interrupt yet.
283 * This will be controlled from vxge_irq() entry api.
284 */
285 return;
286}
287
288/**
289 * vxge_hw_device_intr_disable - Disable Titan interrupts.
290 * @hldev: HW device handle.
291 *
292 * Disable Titan interrupts.
293 *
294 * See also: vxge_hw_device_intr_enable()
295 */
297{
299
300 /* mask all the tim interrupts */
303 &hldev->common_reg->tim_int_mask1);
304
306
307 return;
308}
309
310/**
311 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
312 * @ring: Handle to the ring object used for receive
313 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
314 *
315 * Post descriptor on the ring.
316 * Prior to posting the descriptor should be filled in accordance with
317 * Host/Titan interface specification for a given service (LL, etc.).
318 */
324
325/**
326 * __vxge_hw_non_offload_db_post - Post non offload doorbell
327 *
328 * @fifo: fifohandle
329 * @txdl_ptr: The starting location of the TxDL in host memory
330 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
331 *
332 * This function posts a non-offload doorbell to doorbell FIFO
333 *
334 */
336 u64 txdl_ptr, u32 num_txds)
337{
340 &fifo->nofl_db->control_0);
341
342 wmb();
343
344 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
345
346 wmb();
347}
348
349/**
350 * vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
351 *
352 * @fifo: tx channel handle
353 */
354struct vxge_hw_fifo_txd *
356{
357 struct vxge_hw_fifo_txd *txdp;
358
359 txdp = fifo->txdl + fifo->sw_offset;
360 if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
361 vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
362 __func__, __LINE__, fifo->sw_offset);
363 return NULL;
364 }
365
366 return txdp;
367}
368/**
369 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
370 * descriptor.
371 * @fifo: Handle to the fifo object used for non offload send
372 * @txdlh: Descriptor handle.
373 * @iob: data buffer.
374 */
376 struct vxge_hw_fifo_txd *txdp,
377 struct io_buffer *iob)
378{
381 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
382
383 txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
385
386 txdp->host_control = (intptr_t)iob;
387 txdp->buffer_pointer = virt_to_bus(iob->data);
388}
389
390/**
391 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
392 * @fifo: Handle to the fifo object used for non offload send
393 * @txdp: Tx Descriptor
394 *
395 * Post descriptor on the 'fifo' type channel for transmission.
396 * Prior to posting the descriptor should be filled in accordance with
397 * Host/Titan interface specification for a given service (LL, etc.).
398 *
399 */
409
410/*
411 * __vxge_hw_vpath_alarm_process - Process Alarms.
412 * @vpath: Virtual Path.
413 * @skip_alarms: Do not clear the alarms
414 *
415 * Process vpath alarms.
416 *
417 */
419 struct __vxge_hw_virtualpath *vpath)
420{
421 u64 val64;
422 u64 alarm_status;
424 struct __vxge_hw_device *hldev = NULL;
425 struct vxge_hw_vpath_reg *vp_reg;
426
427 hldev = vpath->hldev;
428 vp_reg = vpath->vp_reg;
429 alarm_status = readq(&vp_reg->vpath_general_int_status);
430
431 if (alarm_status == VXGE_HW_ALL_FOXES) {
432
433 vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
434 hldev->ndev->name, __func__, __LINE__);
436 goto out;
437 }
438
439 if (alarm_status & ~(
444
445 vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
446 hldev->ndev->name, __func__, __LINE__);
448 goto out;
449 }
450
452
453 val64 = readq(&vp_reg->xgmac_vp_int_status);
454
455 if (val64 &
457
458 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
459
460 if (((val64 &
462 (!(val64 &
464 ((val64 &
466 && (!(val64 &
468 ))) {
470 &vp_reg->asic_ntwk_vp_err_mask);
471
472 netdev_link_down(hldev->ndev);
473 vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
474 hldev->ndev->name, __func__, __LINE__);
475 }
476
477 if (((val64 &
479 (!(val64 &
481 ((val64 &
483 && (!(val64 &
485 ))) {
487 &vp_reg->asic_ntwk_vp_err_mask);
488
489 netdev_link_up(hldev->ndev);
490 vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
491 hldev->ndev->name, __func__, __LINE__);
492 }
493
495 &vp_reg->asic_ntwk_vp_err_reg);
496 }
497 } else {
498 vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
499 hldev->ndev->name, __func__, __LINE__,
500 alarm_status);
501 }
502out:
503 return status;
504}
505
506/**
507 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
508 * condition that has caused the Tx and RX interrupt.
509 * @hldev: HW device.
510 *
511 * Acknowledge (that is, clear) the condition that has caused
512 * the Tx and Rx interrupt.
513 * See also: vxge_hw_device_begin_irq(),
514 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
515 */
517{
518
519 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
520 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
523 &hldev->common_reg->tim_int_status0);
524 }
525
526 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
527 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
531 &hldev->common_reg->tim_int_status1);
532 }
533
534 return;
535}
536
537
538/**
539 * vxge_hw_device_begin_irq - Begin IRQ processing.
540 * @hldev: HW device handle.
541 *
542 * The function performs two actions, It first checks whether (shared IRQ) the
543 * interrupt was raised by the device. Next, it masks the device interrupts.
544 *
545 * Note:
546 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
547 * bridge. Therefore, two back-to-back interrupts are potentially possible.
548 *
549 * Returns: 0, if the interrupt is not "ours" (note that in this case the
550 * device remain enabled).
551 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
552 * status.
553 */
556{
557 u64 val64;
559 u64 vpath_mask;
560 enum vxge_hw_status ret = VXGE_HW_OK;
561
562 val64 = readq(&hldev->common_reg->titan_general_int_status);
563
564 if (!val64) {
566 goto exit;
567 }
568
569 if (val64 == VXGE_HW_ALL_FOXES) {
570
572
574
575 vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
576 "occurred\n", hldev->ndev->name,
577 __func__, __LINE__);
579 goto exit;
580 }
581 }
582
583 vpath_mask = hldev->vpaths_deployed >>
586 vpath_mask))
588
591
592exit:
593 return ret;
594}
595
596/**
597 * vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive
598 * descriptors posted.
599 * @ring: Handle to the ring object used for receive
600 *
601 * The function writes the number of qwords of rxds posted during replishment.
602 * Since the function is called frequently, a flush is not required to post the
603 * write transaction. At the very least, the previous write will be flushed
604 * once the subsequent write is made.
605 *
606 * Returns: None.
607 */
609{
610 u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
612
614
616
617 if (ring->total_db_cnt >= rxds_qw_per_block) {
618 /* For each block add 4 more qwords */
620
621 /* Reset total count */
622 ring->total_db_cnt -= rxds_qw_per_block;
623 }
624
625 if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
626 wmb();
628 ring->doorbell_cnt),
629 &ring->vp_reg->prc_rxd_doorbell);
630 ring->doorbell_cnt = 0;
631 }
632}
633
634/**
635 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
636 * descriptors and process the same.
637 * @ring: Handle to the ring object used for receive
638 *
639 * The function polls the Rx for the completed descriptors.
640 */
641#define ETH_FCS_LEN 4
643{
644 struct __vxge_hw_device *hldev;
646 struct vxge_hw_ring_rxd_1 *rxd;
647 unsigned int len;
648 enum vxge_hw_ring_tcode tcode;
649 struct io_buffer *rx_iob, *iobuf = NULL;
650 u16 poll_count = 0;
651
652 hldev = ring->vpathh->hldev;
653
654 do {
655 rxd = &ring->rxdl->rxd[ring->rxd_offset];
656 tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
657
658 /* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
659 * possible the ownership bit still set to adapter
660 */
661 if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
662 && (tcode == VXGE_HW_RING_T_CODE_OK)) {
663
665 goto err0;
666 }
667
668 vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
669 hldev->ndev->name, ring->rxd_offset);
670
671 iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
672
673 if (tcode != VXGE_HW_RING_T_CODE_OK) {
674 netdev_rx_err(hldev->ndev, NULL, -EINVAL);
675 vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
676 __func__, __LINE__, tcode);
678 goto err1;
679 }
680
682 len -= ETH_FCS_LEN;
683
684 rx_iob = alloc_iob(len);
685 if (!rx_iob) {
686 netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
687 vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
688 __func__, __LINE__);
690 goto err1;
691 }
692
693 memcpy(iob_put(rx_iob, len), iobuf->data, len);
694 /* Add this packet to the receive queue. */
695 netdev_rx(hldev->ndev, rx_iob);
696
697err1:
698 /* repost the rxd */
699 rxd->control_0 = rxd->control_1 = 0;
703
704 /* repost the qword count for doorbell */
706
707 /* increment the descriptor offset */
709
710 } while (++poll_count < ring->rx_poll_weight);
711err0:
712 return status;
713}
714
715/**
716 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
717 * the same.
718 * @fifo: Handle to the fifo object used for non offload send
719 *
720 * The function polls the Tx for the completed descriptors and calls
721 * the driver via supplied completion callback.
722 */
724{
726 struct vxge_hw_fifo_txd *txdp;
727
728 txdp = fifo->txdl + fifo->hw_offset;
729 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
730 && (txdp->host_control)) {
731
732 vxge_xmit_compl(fifo, txdp,
734
736 }
737
738 return status;
739}
#define NULL
NULL pointer (VOID *)
Definition Base.h:322
__be32 out[4]
Definition CIB_PRM.h:8
unsigned long intptr_t
Definition stdint.h:21
#define ETH_FCS_LEN
Definition atl1e.h:45
#define rxd
Definition davicom.c:146
ring len
Length.
Definition dwmac.h:226
uint8_t status
Status.
Definition ena.h:5
Error codes.
#define __unused
Declare a variable or data structure as unused.
Definition compiler.h:573
#define FILE_LICENCE(_licence)
Declare a particular licence as applying to a file.
Definition compiler.h:896
#define EINVAL
Invalid argument.
Definition errno.h:429
#define ENOMEM
Not enough space.
Definition errno.h:535
#define __iomem
Definition igbvf_osdep.h:46
#define wmb()
Definition io.h:546
#define readq(io_addr)
Definition io.h:234
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition io.h:184
#define writeq(data, io_addr)
Definition io.h:273
uint64_t u64
Definition stdint.h:26
String functions.
void * memcpy(void *dest, const void *src, size_t len) __nonnull
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition iobuf.c:131
#define iob_put(iobuf, len)
Definition iobuf.h:125
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition iobuf.h:160
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition netdevice.c:231
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition netdevice.c:549
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition netdevice.c:587
Network device management.
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition netdevice.h:789
@ rxdp
Definition sis900.h:32
@ txdp
Definition sis900.h:30
struct __vxge_hw_device - Hal device object @magic: Magic Number @bar0: BAR0 virtual address.
struct vxge_hw_common_reg * common_reg
struct net_device * ndev
struct __vxge_hw_virtualpath virtual_path
struct vxgedev * vdev
struct __vxge_hw_non_offload_db_wrapper * nofl_db
struct vxge_hw_fifo_txd * txdl
struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1]
struct vxge_hw_vpath_reg * vp_reg
struct __vxge_hw_ring_block * rxdl
struct __vxge_hw_virtualpath * vpathh
struct vxge_hw_vpath_reg * vp_reg
struct __vxge_hw_device * hldev
A persistent I/O buffer.
Definition iobuf.h:38
void * data
Start of data.
Definition iobuf.h:53
char name[NETDEV_NAME_LEN]
Name of this network device.
Definition netdevice.h:363
u64 titan_general_int_status
Definition vxge_reg.h:803
struct vxge_hw_fifo_txd - Transmit Descriptor
struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
u64 srpcim_to_vpath_alarm_reg
Definition vxge_reg.h:4547
u64 vpath_general_int_status
Definition vxge_reg.h:4488
u64 mrpcim_to_vpath_alarm_reg
Definition vxge_reg.h:4542
u64 srpcim_msg_to_vpath_reg
Definition vxge_reg.h:4473
u64 srpcim_msg_to_vpath_mask
Definition vxge_reg.h:4476
u64 mrpcim_to_vpath_alarm_mask
Definition vxge_reg.h:4545
u64 vpath_pcipif_int_status
Definition vxge_reg.h:4464
u64 srpcim_to_vpath_alarm_mask
Definition vxge_reg.h:4550
#define u16
Definition vga.h:20
#define u32
Definition vga.h:21
static void vxge_hw_fifo_txd_offset_up(u16 *offset)
static void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val)
#define VXGE_HW_FIFO_TXD_GATHER_CODE(val)
#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST
#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1)
#define vxge_debug(mask, fmt...)
#define VXGE_INFO
Definition vxge_config.h:50
#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0)
#define VXGE_INTR
Definition vxge_config.h:51
#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0)
#define VXGE_HW_NODBW_TYPE(val)
#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
#define VXGE_ERR
Definition vxge_config.h:54
#define VXGE_HW_RING_RXD_QWORDS_MODE_1
static void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp, struct io_buffer *iob, u32 size)
vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
vxge_hw_status
Definition vxge_config.h:70
@ VXGE_HW_ERR_SLOT_FREEZE
@ VXGE_HW_ERR_WRONG_IRQ
Definition vxge_config.h:88
@ VXGE_HW_FAIL
Definition vxge_config.h:72
@ VXGE_HW_OK
Definition vxge_config.h:71
@ VXGE_HW_ERR_OUT_OF_MEMORY
Definition vxge_config.h:85
@ VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
Definition vxge_config.h:76
@ VXGE_HW_ERR_VPATH_NOT_OPEN
Definition vxge_config.h:87
#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
#define VXGE_HW_FIFO_TXD_INT_NUMBER(val)
#define VXGE_HW_NODBW_TYPE_NODBW
static void vxge_hw_ring_rxd_offset_up(u16 *offset)
#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val)
#define VXGE_HW_MAX_RXDS_PER_BLOCK_1
#define VXGE_HW_VP_NOT_OPEN
enum vxge_hw_status vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode)
Definition vxge_main.c:65
#define VXGE_LL_MAX_FRAME_SIZE(dev)
Definition vxge_main.h:147
#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR
Definition vxge_reg.h:4530
#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW
Definition vxge_reg.h:4529
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT
Definition vxge_reg.h:4489
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR
Definition vxge_reg.h:4514
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON
Definition vxge_reg.h:4517
#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val)
Definition vxge_reg.h:4099
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR
Definition vxge_reg.h:4247
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON
Definition vxge_reg.h:4518
#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val)
Definition vxge_reg.h:807
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR
Definition vxge_reg.h:4245
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR
Definition vxge_reg.h:4515
#define vxge_bVALn(bits, loc, n)
Definition vxge_reg.h:35
#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT
Definition vxge_reg.h:4249
#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ
Definition vxge_reg.h:4531
#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT
Definition vxge_reg.h:806
#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP
Definition vxge_reg.h:4051
#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK
Definition vxge_reg.h:4251
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT
Definition vxge_reg.h:4491
#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT
Definition vxge_reg.h:4239
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT
Definition vxge_reg.h:4492
#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW
Definition vxge_reg.h:4528
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT
Definition vxge_reg.h:4490
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR
Definition vxge_reg.h:4520
#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM
Definition vxge_reg.h:812
u64 adapter_status
Definition vxge_reg.h:121
#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
Definition vxge_reg.h:813
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR
Definition vxge_reg.h:4521
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
Definition vxge_reg.h:4243
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
Definition vxge_reg.h:4244
void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
vxge_hw_device_unmask_all - Unmask all device interrupts.
void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
vxge_hw_device_intr_enable - Enable interrupts.
enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev)
vxge_hw_device_begin_irq - Begin IRQ processing.
void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
vxge_hw_device_mask_all - Mask all device interrupts.
void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.
enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)
void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused, struct vxge_hw_ring_rxd_1 *rxdp)
vxge_hw_ring_rxd_post - Post descriptor on the ring.
static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, u64 txdl_ptr, u32 num_txds)
__vxge_hw_non_offload_db_post - Post non offload doorbell
enum vxge_hw_status vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the condition that has caused the Tx and RX...
static enum vxge_hw_status __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath)
enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo)
vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
struct vxge_hw_fifo_txd * vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo)
vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp)
vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp, struct io_buffer *iob)
vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the descriptor.
void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
vxge_hw_device_intr_disable - Disable Titan interrupts.
#define VXGE_HW_INTR_MASK_ALL
vxge_hw_ring_tcode
enum vxge_hw_ring_tcode - Transfer codes returned by adapter @VXGE_HW_RING_T_CODE_OK: Transfer ok.
@ VXGE_HW_RING_T_CODE_OK
@ VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST
#define VXGE_HW_VPATH_INTR_TX
#define VXGE_HW_ALL_FOXES
#define VXGE_HW_DEFAULT_32
#define VXGE_HW_VPATH_INTR_RX
#define VXGE_HW_MAX_VIRTUAL_PATHS