iPXE
Defines | Functions
vxge_traffic.c File Reference
#include <ipxe/netdevice.h>
#include <errno.h>
#include "vxge_traffic.h"
#include "vxge_config.h"
#include "vxge_main.h"

Go to the source code of this file.

Defines

#define ETH_FCS_LEN   4
 vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed descriptors and process the same.

Functions

 FILE_LICENCE (GPL2_ONLY)
enum vxge_hw_status vxge_hw_vpath_intr_enable (struct __vxge_hw_virtualpath *vpath)
enum vxge_hw_status vxge_hw_vpath_intr_disable (struct __vxge_hw_virtualpath *vpath)
void vxge_hw_device_mask_all (struct __vxge_hw_device *hldev)
 vxge_hw_device_mask_all - Mask all device interrupts.
void vxge_hw_device_unmask_all (struct __vxge_hw_device *hldev)
 vxge_hw_device_unmask_all - Unmask all device interrupts.
void vxge_hw_device_intr_enable (struct __vxge_hw_device *hldev)
 vxge_hw_device_intr_enable - Enable interrupts.
void vxge_hw_device_intr_disable (struct __vxge_hw_device *hldev)
 vxge_hw_device_intr_disable - Disable Titan interrupts.
void vxge_hw_ring_rxd_post (struct __vxge_hw_ring *ring __unused, struct vxge_hw_ring_rxd_1 *rxdp)
 vxge_hw_ring_rxd_post - Post descriptor on the ring.
static void __vxge_hw_non_offload_db_post (struct __vxge_hw_fifo *fifo, u64 txdl_ptr, u32 num_txds)
 __vxge_hw_non_offload_db_post - Post non offload doorbell
struct vxge_hw_fifo_txd * vxge_hw_fifo_free_txdl_get (struct __vxge_hw_fifo *fifo)
 vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
void vxge_hw_fifo_txdl_buffer_set (struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp, struct io_buffer *iob)
 vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the descriptor.
void vxge_hw_fifo_txdl_post (struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp)
 vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
static enum vxge_hw_status __vxge_hw_vpath_alarm_process (struct __vxge_hw_virtualpath *vpath)
void vxge_hw_device_clear_tx_rx (struct __vxge_hw_device *hldev)
 vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the condition that has caused the Tx and RX interrupt.
enum vxge_hw_status vxge_hw_device_begin_irq (struct __vxge_hw_device *hldev)
 vxge_hw_device_begin_irq - Begin IRQ processing.
void vxge_hw_vpath_doorbell_rx (struct __vxge_hw_ring *ring)
 vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.
enum vxge_hw_status vxge_hw_vpath_poll_rx (struct __vxge_hw_ring *ring)
enum vxge_hw_status vxge_hw_vpath_poll_tx (struct __vxge_hw_fifo *fifo)
 vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.

Define Documentation

#define ETH_FCS_LEN   4

vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed descriptors and process the same.

: Handle to the ring object used for receive

The function polls the Rx for the completed descriptors.

Definition at line 640 of file vxge_traffic.c.

Referenced by vxge_hw_vpath_poll_rx().


Function Documentation

FILE_LICENCE ( GPL2_ONLY  )
enum vxge_hw_status vxge_hw_vpath_intr_enable ( struct __vxge_hw_virtualpath *  vpath)

Definition at line 34 of file vxge_traffic.c.

References readq(), status, and writeq().

Referenced by vxge_hw_device_intr_enable().

{
        struct vxge_hw_vpath_reg *vp_reg;
        enum vxge_hw_status status = VXGE_HW_OK;

        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
        goto exit;
        }

        vp_reg = vpath->vp_reg;

        writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->general_errors_reg);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->pci_config_errors_reg);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->mrpcim_to_vpath_alarm_reg);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->srpcim_to_vpath_alarm_reg);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->vpath_ppif_int_status);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->srpcim_msg_to_vpath_reg);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->vpath_pcipif_int_status);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->prc_alarm_reg);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->wrdma_alarm_status);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->asic_ntwk_vp_err_reg);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->xgmac_vp_int_status);

        readq(&vp_reg->vpath_general_int_status);

        /* Mask unwanted interrupts */
        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->vpath_pcipif_int_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->srpcim_msg_to_vpath_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->srpcim_to_vpath_alarm_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->mrpcim_to_vpath_alarm_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->pci_config_errors_mask);

        /* Unmask the individual interrupts */
        writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
                VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
                VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
                VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
                &vp_reg->general_errors_mask);

        __vxge_hw_pio_mem_write32_upper(
                (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
                &vp_reg->kdfcctl_errors_mask);

        __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);

        __vxge_hw_pio_mem_write32_upper(
                (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
                &vp_reg->prc_alarm_mask);

        __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
        __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);

        if (vpath->hldev->first_vp_id != vpath->vp_id)
                __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->asic_ntwk_vp_err_mask);
        else
                __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
                VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT|
                        VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK),
                        0, 32), &vp_reg->asic_ntwk_vp_err_mask);

        __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_general_int_mask);
exit:
        return status;

}
enum vxge_hw_status vxge_hw_vpath_intr_disable ( struct __vxge_hw_virtualpath *  vpath)

Definition at line 149 of file vxge_traffic.c.

References status, and writeq().

Referenced by vxge_hw_device_intr_disable().

{
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_hw_vpath_reg __iomem *vp_reg;

        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
                goto exit;
        }
        vp_reg = vpath->vp_reg;

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->vpath_general_int_mask);

        writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->general_errors_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->pci_config_errors_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->mrpcim_to_vpath_alarm_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->srpcim_to_vpath_alarm_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->vpath_ppif_int_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->srpcim_msg_to_vpath_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->vpath_pcipif_int_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->wrdma_alarm_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->prc_alarm_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->xgmac_vp_int_mask);

        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
                        &vp_reg->asic_ntwk_vp_err_mask);

exit:
        return status;
}
void vxge_hw_device_mask_all ( struct __vxge_hw_device *  hldev)

vxge_hw_device_mask_all - Mask all device interrupts.

: HW device handle.

Mask all device interrupts.

See also: vxge_hw_device_unmask_all()

Definition at line 210 of file vxge_traffic.c.

Referenced by vxge_hw_device_intr_disable(), vxge_hw_device_intr_enable(), and vxge_irq().

{
        u64 val64;

        val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
                        VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;

        __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
                        &hldev->common_reg->titan_mask_all_int);

        return;
}
void vxge_hw_device_unmask_all ( struct __vxge_hw_device *  hldev)

vxge_hw_device_unmask_all - Unmask all device interrupts.

: HW device handle.

Unmask all device interrupts.

See also: vxge_hw_device_mask_all()

Definition at line 231 of file vxge_traffic.c.

Referenced by vxge_close(), and vxge_irq().

{
        u64 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;

        __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
                        &hldev->common_reg->titan_mask_all_int);

        return;
}
void vxge_hw_device_intr_enable ( struct __vxge_hw_device *  hldev)

vxge_hw_device_intr_enable - Enable interrupts.

: HW device handle.

Enable Titan interrupts. The function is to be executed the last in Titan initialization sequence.

See also: vxge_hw_device_intr_disable()

Definition at line 250 of file vxge_traffic.c.

References readq(), vxge_hw_device_mask_all(), vxge_hw_vpath_intr_enable(), and writeq().

Referenced by vxge_open().

{
        u64 val64;
        u32 val32;

        vxge_hw_device_mask_all(hldev);

        vxge_hw_vpath_intr_enable(&hldev->virtual_path);

        val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
                        hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];

        if (val64 != 0) {
                writeq(val64, &hldev->common_reg->tim_int_status0);

                writeq(~val64, &hldev->common_reg->tim_int_mask0);
        }

        val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
                        hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];

        if (val32 != 0) {
                __vxge_hw_pio_mem_write32_upper(val32,
                                &hldev->common_reg->tim_int_status1);

                __vxge_hw_pio_mem_write32_upper(~val32,
                                &hldev->common_reg->tim_int_mask1);
        }

        val64 = readq(&hldev->common_reg->titan_general_int_status);

        /* We have not enabled the top level interrupt yet.
         * This will be controlled from vxge_irq() entry api.
         */
        return;
}
void vxge_hw_device_intr_disable ( struct __vxge_hw_device *  hldev)

vxge_hw_device_intr_disable - Disable Titan interrupts.

: HW device handle.

Disable Titan interrupts.

See also: vxge_hw_device_intr_enable()

Definition at line 295 of file vxge_traffic.c.

References vxge_hw_device_mask_all(), vxge_hw_vpath_intr_disable(), and writeq().

Referenced by vxge_close().

{
        vxge_hw_device_mask_all(hldev);

        /* mask all the tim interrupts */
        writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
        __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
                                &hldev->common_reg->tim_int_mask1);

        vxge_hw_vpath_intr_disable(&hldev->virtual_path);

        return;
}
void vxge_hw_ring_rxd_post ( struct __vxge_hw_ring *ring  __unused,
struct vxge_hw_ring_rxd_1 *  rxdp 
)

vxge_hw_ring_rxd_post - Post descriptor on the ring.

: Handle to the ring object used for receive : Descriptor obtained via vxge_hw_ring_rxd_reserve().

Post descriptor on the ring. Prior to posting the descriptor should be filled in accordance with Host/Titan interface specification for a given service (LL, etc.).

Definition at line 318 of file vxge_traffic.c.

Referenced by vxge_hw_ring_replenish(), and vxge_hw_vpath_poll_rx().

{
        rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
}
static void __vxge_hw_non_offload_db_post ( struct __vxge_hw_fifo *  fifo,
u64  txdl_ptr,
u32  num_txds 
) [static]

__vxge_hw_non_offload_db_post - Post non offload doorbell

: fifohandle : The starting location of the TxDL in host memory : The highest TxD in this TxDL (0 to 255 means 1 to 256)

This function posts a non-offload doorbell to doorbell FIFO

Definition at line 334 of file vxge_traffic.c.

References wmb, and writeq().

Referenced by vxge_hw_fifo_txdl_post().

{
        writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
                VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds),
                &fifo->nofl_db->control_0);

        wmb();

        writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);

        wmb();
}
struct vxge_hw_fifo_txd* vxge_hw_fifo_free_txdl_get ( struct __vxge_hw_fifo *  fifo) [read]

vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo

: tx channel handle

Definition at line 354 of file vxge_traffic.c.

References NULL, and txdp.

Referenced by vxge_xmit().

{
        struct vxge_hw_fifo_txd *txdp;

        txdp = fifo->txdl + fifo->sw_offset;
        if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
                vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
                                __func__, __LINE__, fifo->sw_offset);
                return NULL;
        }

        return txdp;
}
void vxge_hw_fifo_txdl_buffer_set ( struct __vxge_hw_fifo *  fifo,
struct vxge_hw_fifo_txd *  txdp,
struct io_buffer iob 
)

vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the descriptor.

: Handle to the fifo object used for non offload send : Descriptor handle. : data buffer.

Definition at line 374 of file vxge_traffic.c.

References io_buffer::data, iob_len(), and virt_to_bus().

Referenced by vxge_xmit().

{
        txdp->control_0 = VXGE_HW_FIFO_TXD_GATHER_CODE(
                        VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST);
        txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));

        txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
        txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;

        txdp->host_control = (intptr_t)iob;
        txdp->buffer_pointer = virt_to_bus(iob->data);
}
void vxge_hw_fifo_txdl_post ( struct __vxge_hw_fifo *  fifo,
struct vxge_hw_fifo_txd *  txdp 
)

vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.

: Handle to the fifo object used for non offload send : Tx Descriptor

Post descriptor on the 'fifo' type channel for transmission. Prior to posting the descriptor should be filled in accordance with Host/Titan interface specification for a given service (LL, etc.).

Definition at line 399 of file vxge_traffic.c.

References __vxge_hw_non_offload_db_post(), and virt_to_bus().

Referenced by vxge_xmit().

{
        txdp->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;

        __vxge_hw_non_offload_db_post(fifo, (u64) virt_to_bus(txdp), 0);

        vxge_hw_fifo_txd_offset_up(&fifo->sw_offset);
}
static enum vxge_hw_status __vxge_hw_vpath_alarm_process ( struct __vxge_hw_virtualpath *  vpath) [static]

Definition at line 417 of file vxge_traffic.c.

References netdev_link_down(), netdev_link_up(), NULL, out, readq(), status, and writeq().

Referenced by vxge_hw_device_begin_irq().

{
        u64 val64;
        u64 alarm_status;
        enum vxge_hw_status status = VXGE_HW_OK;
        struct __vxge_hw_device *hldev = NULL;
        struct vxge_hw_vpath_reg *vp_reg;

        hldev = vpath->hldev;
        vp_reg = vpath->vp_reg;
        alarm_status = readq(&vp_reg->vpath_general_int_status);

        if (alarm_status == VXGE_HW_ALL_FOXES) {

                vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
                        hldev->ndev->name, __func__, __LINE__);
                status = VXGE_HW_ERR_SLOT_FREEZE;
                goto out;
        }

        if (alarm_status & ~(
                VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
                VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
                VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
                VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {

                vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
                        hldev->ndev->name, __func__, __LINE__);
                status = VXGE_HW_FAIL;
                goto out;
        }

        if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {

                val64 = readq(&vp_reg->xgmac_vp_int_status);

                if (val64 &
                VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {

                        val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);

                        if (((val64 &
                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
                            (!(val64 &
                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
                            ((val64 &
                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
                                && (!(val64 &
                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
                        ))) {
                                writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
                                        &vp_reg->asic_ntwk_vp_err_mask);

                                netdev_link_down(hldev->ndev);
                                vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
                                        hldev->ndev->name, __func__, __LINE__);
                        }

                        if (((val64 &
                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
                            (!(val64 &
                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
                            ((val64 &
                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
                                && (!(val64 &
                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
                        ))) {
                                writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
                                        &vp_reg->asic_ntwk_vp_err_mask);

                                netdev_link_up(hldev->ndev);
                                vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
                                        hldev->ndev->name, __func__, __LINE__);
                        }

                        writeq(VXGE_HW_INTR_MASK_ALL,
                                &vp_reg->asic_ntwk_vp_err_reg);
                }
        } else {
                vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
                                hldev->ndev->name, __func__, __LINE__,
                                alarm_status);
        }
out:
        return status;
}
void vxge_hw_device_clear_tx_rx ( struct __vxge_hw_device *  hldev)

vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the condition that has caused the Tx and RX interrupt.

: HW device.

Acknowledge (that is, clear) the condition that has caused the Tx and Rx interrupt. See also: vxge_hw_device_begin_irq(), vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().

Definition at line 515 of file vxge_traffic.c.

References writeq().

Referenced by vxge_hw_device_begin_irq().

{

        if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
                        (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
                writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
                        hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
                        &hldev->common_reg->tim_int_status0);
        }

        if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
                        (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
                __vxge_hw_pio_mem_write32_upper(
                        (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
                        hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
                        &hldev->common_reg->tim_int_status1);
        }

        return;
}
enum vxge_hw_status vxge_hw_device_begin_irq ( struct __vxge_hw_device *  hldev)

vxge_hw_device_begin_irq - Begin IRQ processing.

: HW device handle.

The function performs two actions, It first checks whether (shared IRQ) the interrupt was raised by the device. Next, it masks the device interrupts.

Note: vxge_hw_device_begin_irq() does not flush MMIO writes through the bridge. Therefore, two back-to-back interrupts are potentially possible.

Returns: 0, if the interrupt is not "ours" (note that in this case the device remain enabled). Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter status.

Definition at line 554 of file vxge_traffic.c.

References __vxge_hw_vpath_alarm_process(), readq(), ret, and vxge_hw_device_clear_tx_rx().

Referenced by vxge_poll().

{
        u64 val64;
        u64 adapter_status;
        u64 vpath_mask;
        enum vxge_hw_status ret = VXGE_HW_OK;

        val64 = readq(&hldev->common_reg->titan_general_int_status);

        if (!val64) {
                ret = VXGE_HW_ERR_WRONG_IRQ;
                goto exit;
        }

        if (val64 == VXGE_HW_ALL_FOXES) {

                adapter_status = readq(&hldev->common_reg->adapter_status);

                if (adapter_status == VXGE_HW_ALL_FOXES) {

                        vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
                                "occurred\n", hldev->ndev->name,
                                __func__, __LINE__);
                        ret = VXGE_HW_ERR_SLOT_FREEZE;
                        goto exit;
                }
        }

        vpath_mask = hldev->vpaths_deployed >>
                                (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
        if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
                                vpath_mask))
                vxge_hw_device_clear_tx_rx(hldev);

        if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)
                ret = __vxge_hw_vpath_alarm_process(&hldev->virtual_path);

exit:
        return ret;
}
void vxge_hw_vpath_doorbell_rx ( struct __vxge_hw_ring *  ring)

vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.

: Handle to the ring object used for receive

The function writes the number of qwords of rxds posted during replishment. Since the function is called frequently, a flush is not required to post the write transaction. At the very least, the previous write will be flushed once the subsequent write is made.

Returns: None.

Definition at line 607 of file vxge_traffic.c.

References wmb, and writeq().

Referenced by vxge_hw_vpath_poll_rx().

{
        u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
                VXGE_HW_RING_RXD_QWORDS_MODE_1;

        ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;

        ring->total_db_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;

        if (ring->total_db_cnt >= rxds_qw_per_block) {
                /* For each block add 4 more qwords */
                ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;

                /* Reset total count */
                ring->total_db_cnt -= rxds_qw_per_block;
        }

        if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
                wmb();
                writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(
                        ring->doorbell_cnt),
                        &ring->vp_reg->prc_rxd_doorbell);
                ring->doorbell_cnt = 0;
        }
}
enum vxge_hw_status vxge_hw_vpath_poll_rx ( struct __vxge_hw_ring *  ring)

Definition at line 641 of file vxge_traffic.c.

References alloc_iob(), io_buffer::data, EINVAL, ENOMEM, ETH_FCS_LEN, iob_put, len, memcpy(), netdev_rx(), netdev_rx_err(), NULL, rxd, status, vxge_hw_ring_rxd_post(), and vxge_hw_vpath_doorbell_rx().

Referenced by vxge_poll().

{
        struct __vxge_hw_device *hldev;
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_hw_ring_rxd_1 *rxd;
        unsigned int len;
        enum vxge_hw_ring_tcode tcode;
        struct io_buffer *rx_iob, *iobuf = NULL;
        u16 poll_count = 0;

        hldev = ring->vpathh->hldev;

        do {
                rxd = &ring->rxdl->rxd[ring->rxd_offset];
                tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);

                /* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
                 * possible the ownership bit still set to adapter
                 */
                if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
                        && (tcode == VXGE_HW_RING_T_CODE_OK)) {

                        status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
                        goto err0;
                }

                vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
                        hldev->ndev->name, ring->rxd_offset);

                iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;

                if (tcode != VXGE_HW_RING_T_CODE_OK) {
                        netdev_rx_err(hldev->ndev, NULL, -EINVAL);
                        vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
                                __func__, __LINE__, tcode);
                        status = VXGE_HW_FAIL;
                        goto err1;
                }

                len = VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxd->control_1);
                len -= ETH_FCS_LEN;

                rx_iob = alloc_iob(len);
                if (!rx_iob) {
                        netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
                        vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
                                __func__, __LINE__);
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
                        goto err1;
                }

                memcpy(iob_put(rx_iob, len), iobuf->data, len);
                /* Add this packet to the receive queue. */
                netdev_rx(hldev->ndev, rx_iob);

err1:
                /* repost the rxd */
                rxd->control_0 = rxd->control_1 = 0;
                vxge_hw_ring_rxd_1b_set(rxd, iobuf,
                                VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
                vxge_hw_ring_rxd_post(ring, rxd);

                /* repost the qword count for doorbell */
                vxge_hw_vpath_doorbell_rx(ring);

                /* increment the descriptor offset */
                vxge_hw_ring_rxd_offset_up(&ring->rxd_offset);

        } while (++poll_count < ring->rx_poll_weight);
err0:
        return status;
}
enum vxge_hw_status vxge_hw_vpath_poll_tx ( struct __vxge_hw_fifo *  fifo)

vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.

: Handle to the fifo object used for non offload send

The function polls the Tx for the completed descriptors and calls the driver via supplied completion callback.

Definition at line 722 of file vxge_traffic.c.

References status, txdp, and vxge_xmit_compl().

Referenced by vxge_poll().

{
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_hw_fifo_txd *txdp;

        txdp = fifo->txdl + fifo->hw_offset;
        if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
                && (txdp->host_control)) {

                vxge_xmit_compl(fifo, txdp,
                        VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0));

                vxge_hw_fifo_txd_offset_up(&fifo->hw_offset);
        }

        return status;
}