iPXE
vxge_traffic.c File Reference
#include <ipxe/netdevice.h>
#include <string.h>
#include <errno.h>
#include "vxge_traffic.h"
#include "vxge_config.h"
#include "vxge_main.h"

Go to the source code of this file.

Macros

#define ETH_FCS_LEN   4
 vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed descriptors and process the same.

Functions

 FILE_LICENCE (GPL2_ONLY)
enum vxge_hw_status vxge_hw_vpath_intr_enable (struct __vxge_hw_virtualpath *vpath)
enum vxge_hw_status vxge_hw_vpath_intr_disable (struct __vxge_hw_virtualpath *vpath)
void vxge_hw_device_mask_all (struct __vxge_hw_device *hldev)
 vxge_hw_device_mask_all - Mask all device interrupts.
void vxge_hw_device_unmask_all (struct __vxge_hw_device *hldev)
 vxge_hw_device_unmask_all - Unmask all device interrupts.
void vxge_hw_device_intr_enable (struct __vxge_hw_device *hldev)
 vxge_hw_device_intr_enable - Enable interrupts.
void vxge_hw_device_intr_disable (struct __vxge_hw_device *hldev)
 vxge_hw_device_intr_disable - Disable Titan interrupts.
void vxge_hw_ring_rxd_post (struct __vxge_hw_ring *ring __unused, struct vxge_hw_ring_rxd_1 *rxdp)
 vxge_hw_ring_rxd_post - Post descriptor on the ring.
static void __vxge_hw_non_offload_db_post (struct __vxge_hw_fifo *fifo, u64 txdl_ptr, u32 num_txds)
 __vxge_hw_non_offload_db_post - Post non offload doorbell
struct vxge_hw_fifo_txdvxge_hw_fifo_free_txdl_get (struct __vxge_hw_fifo *fifo)
 vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
void vxge_hw_fifo_txdl_buffer_set (struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp, struct io_buffer *iob)
 vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the descriptor.
void vxge_hw_fifo_txdl_post (struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp)
 vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
static enum vxge_hw_status __vxge_hw_vpath_alarm_process (struct __vxge_hw_virtualpath *vpath)
void vxge_hw_device_clear_tx_rx (struct __vxge_hw_device *hldev)
 vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the condition that has caused the Tx and RX interrupt.
enum vxge_hw_status vxge_hw_device_begin_irq (struct __vxge_hw_device *hldev)
 vxge_hw_device_begin_irq - Begin IRQ processing.
void vxge_hw_vpath_doorbell_rx (struct __vxge_hw_ring *ring)
 vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.
enum vxge_hw_status vxge_hw_vpath_poll_rx (struct __vxge_hw_ring *ring)
enum vxge_hw_status vxge_hw_vpath_poll_tx (struct __vxge_hw_fifo *fifo)
 vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.

Macro Definition Documentation

◆ ETH_FCS_LEN

#define ETH_FCS_LEN   4

vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed descriptors and process the same.

@ring: Handle to the ring object used for receive

The function polls the Rx for the completed descriptors.

Definition at line 641 of file vxge_traffic.c.

Function Documentation

◆ FILE_LICENCE()

FILE_LICENCE ( GPL2_ONLY )

◆ vxge_hw_vpath_intr_enable()

enum vxge_hw_status vxge_hw_vpath_intr_enable ( struct __vxge_hw_virtualpath * vpath)

Definition at line 35 of file vxge_traffic.c.

36{
37 struct vxge_hw_vpath_reg *vp_reg;
39
40 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
42 goto exit;
43 }
44
45 vp_reg = vpath->vp_reg;
46
48
50 &vp_reg->general_errors_reg);
51
53 &vp_reg->pci_config_errors_reg);
54
57
60
62 &vp_reg->vpath_ppif_int_status);
63
66
69
71 &vp_reg->prc_alarm_reg);
72
74 &vp_reg->wrdma_alarm_status);
75
77 &vp_reg->asic_ntwk_vp_err_reg);
78
80 &vp_reg->xgmac_vp_int_status);
81
83
84 /* Mask unwanted interrupts */
86 &vp_reg->vpath_pcipif_int_mask);
87
90
93
96
98 &vp_reg->pci_config_errors_mask);
99
100 /* Unmask the individual interrupts */
105 &vp_reg->general_errors_mask);
106
114 &vp_reg->kdfcctl_errors_mask);
115
117
120 &vp_reg->prc_alarm_mask);
121
124
125 if (vpath->hldev->first_vp_id != vpath->vp_id)
127 &vp_reg->asic_ntwk_vp_err_mask);
128 else
132 0, 32), &vp_reg->asic_ntwk_vp_err_mask);
133
135exit:
136 return status;
137
138}
uint8_t status
Status.
Definition ena.h:5
#define readq(io_addr)
Definition io.h:234
#define writeq(data, io_addr)
Definition io.h:273
struct vxge_hw_vpath_reg * vp_reg
struct __vxge_hw_device * hldev
u64 srpcim_to_vpath_alarm_reg
Definition vxge_reg.h:4547
u64 vpath_general_int_status
Definition vxge_reg.h:4488
u64 mrpcim_to_vpath_alarm_reg
Definition vxge_reg.h:4542
u64 srpcim_msg_to_vpath_reg
Definition vxge_reg.h:4473
u64 srpcim_msg_to_vpath_mask
Definition vxge_reg.h:4476
u64 mrpcim_to_vpath_alarm_mask
Definition vxge_reg.h:4545
u64 vpath_pcipif_int_status
Definition vxge_reg.h:4464
u64 srpcim_to_vpath_alarm_mask
Definition vxge_reg.h:4550
#define u32
Definition vga.h:21
static void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
vxge_hw_status
Definition vxge_config.h:70
@ VXGE_HW_OK
Definition vxge_config.h:71
@ VXGE_HW_ERR_VPATH_NOT_OPEN
Definition vxge_config.h:87
#define VXGE_HW_VP_NOT_OPEN
#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR
Definition vxge_reg.h:4530
#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW
Definition vxge_reg.h:4529
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR
Definition vxge_reg.h:4514
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON
Definition vxge_reg.h:4517
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON
Definition vxge_reg.h:4518
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR
Definition vxge_reg.h:4515
#define vxge_bVALn(bits, loc, n)
Definition vxge_reg.h:35
#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT
Definition vxge_reg.h:4249
#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ
Definition vxge_reg.h:4531
#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP
Definition vxge_reg.h:4051
#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK
Definition vxge_reg.h:4251
#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW
Definition vxge_reg.h:4528
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR
Definition vxge_reg.h:4520
#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR
Definition vxge_reg.h:4521
#define VXGE_HW_INTR_MASK_ALL

References __vxge_hw_pio_mem_write32_upper(), vxge_hw_vpath_reg::asic_ntwk_vp_err_mask, vxge_hw_vpath_reg::asic_ntwk_vp_err_reg, __vxge_hw_device::first_vp_id, vxge_hw_vpath_reg::general_errors_mask, vxge_hw_vpath_reg::general_errors_reg, __vxge_hw_virtualpath::hldev, vxge_hw_vpath_reg::kdfcctl_errors_mask, vxge_hw_vpath_reg::kdfcctl_errors_reg, vxge_hw_vpath_reg::mrpcim_to_vpath_alarm_mask, vxge_hw_vpath_reg::mrpcim_to_vpath_alarm_reg, vxge_hw_vpath_reg::pci_config_errors_mask, vxge_hw_vpath_reg::pci_config_errors_reg, vxge_hw_vpath_reg::prc_alarm_mask, vxge_hw_vpath_reg::prc_alarm_reg, readq, vxge_hw_vpath_reg::srpcim_msg_to_vpath_mask, vxge_hw_vpath_reg::srpcim_msg_to_vpath_reg, vxge_hw_vpath_reg::srpcim_to_vpath_alarm_mask, vxge_hw_vpath_reg::srpcim_to_vpath_alarm_reg, status, u32, __vxge_hw_virtualpath::vp_id, __vxge_hw_virtualpath::vp_open, __vxge_hw_virtualpath::vp_reg, vxge_hw_vpath_reg::vpath_general_int_mask, vxge_hw_vpath_reg::vpath_general_int_status, vxge_hw_vpath_reg::vpath_pcipif_int_mask, vxge_hw_vpath_reg::vpath_pcipif_int_status, vxge_hw_vpath_reg::vpath_ppif_int_mask, vxge_hw_vpath_reg::vpath_ppif_int_status, vxge_bVALn, VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT, VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK, VXGE_HW_ERR_VPATH_NOT_OPEN, VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW, VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW, VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ, VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR, VXGE_HW_INTR_MASK_ALL, VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR, VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR, VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON, VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR, VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR, VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON, VXGE_HW_OK, VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, VXGE_HW_VP_NOT_OPEN, vxge_hw_vpath_reg::wrdma_alarm_mask, vxge_hw_vpath_reg::wrdma_alarm_status, writeq, vxge_hw_vpath_reg::xgmac_vp_int_mask, and vxge_hw_vpath_reg::xgmac_vp_int_status.

Referenced by vxge_hw_device_intr_enable().

◆ vxge_hw_vpath_intr_disable()

enum vxge_hw_status vxge_hw_vpath_intr_disable ( struct __vxge_hw_virtualpath * vpath)

Definition at line 150 of file vxge_traffic.c.

151{
153 struct vxge_hw_vpath_reg __iomem *vp_reg;
154
155 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
157 goto exit;
158 }
159 vp_reg = vpath->vp_reg;
160
162 &vp_reg->vpath_general_int_mask);
163
165
167 &vp_reg->general_errors_mask);
168
170 &vp_reg->pci_config_errors_mask);
171
174
177
179 &vp_reg->vpath_ppif_int_mask);
180
182 &vp_reg->srpcim_msg_to_vpath_mask);
183
185 &vp_reg->vpath_pcipif_int_mask);
186
188 &vp_reg->wrdma_alarm_mask);
189
191 &vp_reg->prc_alarm_mask);
192
194 &vp_reg->xgmac_vp_int_mask);
195
197 &vp_reg->asic_ntwk_vp_err_mask);
198
199exit:
200 return status;
201}
#define __iomem
Definition igbvf_osdep.h:46

References __iomem, __vxge_hw_pio_mem_write32_upper(), vxge_hw_vpath_reg::asic_ntwk_vp_err_mask, vxge_hw_vpath_reg::general_errors_mask, vxge_hw_vpath_reg::kdfcctl_errors_mask, vxge_hw_vpath_reg::mrpcim_to_vpath_alarm_mask, vxge_hw_vpath_reg::pci_config_errors_mask, vxge_hw_vpath_reg::prc_alarm_mask, vxge_hw_vpath_reg::srpcim_msg_to_vpath_mask, vxge_hw_vpath_reg::srpcim_to_vpath_alarm_mask, status, u32, __vxge_hw_virtualpath::vp_open, __vxge_hw_virtualpath::vp_reg, vxge_hw_vpath_reg::vpath_general_int_mask, vxge_hw_vpath_reg::vpath_pcipif_int_mask, vxge_hw_vpath_reg::vpath_ppif_int_mask, VXGE_HW_ERR_VPATH_NOT_OPEN, VXGE_HW_INTR_MASK_ALL, VXGE_HW_OK, VXGE_HW_VP_NOT_OPEN, vxge_hw_vpath_reg::wrdma_alarm_mask, writeq, and vxge_hw_vpath_reg::xgmac_vp_int_mask.

Referenced by vxge_hw_device_intr_disable().

◆ vxge_hw_device_mask_all()

void vxge_hw_device_mask_all ( struct __vxge_hw_device * hldev)

vxge_hw_device_mask_all - Mask all device interrupts.

@hldev: HW device handle.

Mask all device interrupts.

See also: vxge_hw_device_unmask_all()

Definition at line 211 of file vxge_traffic.c.

212{
213 u64 val64;
214
217
220
221 return;
222}
uint64_t u64
Definition stdint.h:26
struct vxge_hw_common_reg * common_reg
#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM
Definition vxge_reg.h:812
#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
Definition vxge_reg.h:813

References __vxge_hw_pio_mem_write32_upper(), __vxge_hw_device::common_reg, vxge_hw_common_reg::titan_mask_all_int, u32, vxge_bVALn, VXGE_HW_TITAN_MASK_ALL_INT_ALARM, and VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC.

Referenced by vxge_hw_device_intr_disable(), vxge_hw_device_intr_enable(), and vxge_irq().

◆ vxge_hw_device_unmask_all()

void vxge_hw_device_unmask_all ( struct __vxge_hw_device * hldev)

vxge_hw_device_unmask_all - Unmask all device interrupts.

@hldev: HW device handle.

Unmask all device interrupts.

See also: vxge_hw_device_mask_all()

Definition at line 232 of file vxge_traffic.c.

233{
235
238
239 return;
240}

References __vxge_hw_pio_mem_write32_upper(), __vxge_hw_device::common_reg, vxge_hw_common_reg::titan_mask_all_int, u32, vxge_bVALn, and VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC.

Referenced by vxge_close(), and vxge_irq().

◆ vxge_hw_device_intr_enable()

void vxge_hw_device_intr_enable ( struct __vxge_hw_device * hldev)

vxge_hw_device_intr_enable - Enable interrupts.

@hldev: HW device handle.

Enable Titan interrupts. The function is to be executed the last in Titan initialization sequence.

See also: vxge_hw_device_intr_disable()

Definition at line 251 of file vxge_traffic.c.

252{
253 u64 val64;
254 u32 val32;
255
257
259
260 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
262
263 if (val64 != 0) {
264 writeq(val64, &hldev->common_reg->tim_int_status0);
265
266 writeq(~val64, &hldev->common_reg->tim_int_mask0);
267 }
268
269 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
271
272 if (val32 != 0) {
274 &hldev->common_reg->tim_int_status1);
275
277 &hldev->common_reg->tim_int_mask1);
278 }
279
280 val64 = readq(&hldev->common_reg->titan_general_int_status);
281
282 /* We have not enabled the top level interrupt yet.
283 * This will be controlled from vxge_irq() entry api.
284 */
285 return;
286}
struct __vxge_hw_virtualpath virtual_path
u64 titan_general_int_status
Definition vxge_reg.h:803
void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
vxge_hw_device_mask_all - Mask all device interrupts.
enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)
#define VXGE_HW_VPATH_INTR_TX
#define VXGE_HW_VPATH_INTR_RX

References __vxge_hw_pio_mem_write32_upper(), __vxge_hw_device::common_reg, readq, __vxge_hw_device::tim_int_mask0, vxge_hw_common_reg::tim_int_mask0, __vxge_hw_device::tim_int_mask1, vxge_hw_common_reg::tim_int_mask1, vxge_hw_common_reg::tim_int_status0, vxge_hw_common_reg::tim_int_status1, vxge_hw_common_reg::titan_general_int_status, u32, __vxge_hw_device::virtual_path, vxge_hw_device_mask_all(), vxge_hw_vpath_intr_enable(), VXGE_HW_VPATH_INTR_RX, VXGE_HW_VPATH_INTR_TX, and writeq.

Referenced by vxge_open().

◆ vxge_hw_device_intr_disable()

void vxge_hw_device_intr_disable ( struct __vxge_hw_device * hldev)

vxge_hw_device_intr_disable - Disable Titan interrupts.

@hldev: HW device handle.

Disable Titan interrupts.

See also: vxge_hw_device_intr_enable()

Definition at line 296 of file vxge_traffic.c.

297{
299
300 /* mask all the tim interrupts */
303 &hldev->common_reg->tim_int_mask1);
304
306
307 return;
308}
enum vxge_hw_status vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
#define VXGE_HW_DEFAULT_32

References __vxge_hw_pio_mem_write32_upper(), __vxge_hw_device::common_reg, vxge_hw_common_reg::tim_int_mask0, vxge_hw_common_reg::tim_int_mask1, __vxge_hw_device::virtual_path, VXGE_HW_DEFAULT_32, vxge_hw_device_mask_all(), VXGE_HW_INTR_MASK_ALL, vxge_hw_vpath_intr_disable(), and writeq.

Referenced by vxge_close().

◆ vxge_hw_ring_rxd_post()

void vxge_hw_ring_rxd_post ( struct __vxge_hw_ring *ring __unused,
struct vxge_hw_ring_rxd_1 * rxdp )

vxge_hw_ring_rxd_post - Post descriptor on the ring.

@ring: Handle to the ring object used for receive @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().

Post descriptor on the ring. Prior to posting the descriptor should be filled in accordance with Host/Titan interface specification for a given service (LL, etc.).

Definition at line 319 of file vxge_traffic.c.

321{
323}
@ rxdp
Definition sis900.h:32
#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER

References __unused, rxdp, and VXGE_HW_RING_RXD_LIST_OWN_ADAPTER.

Referenced by vxge_hw_ring_replenish(), and vxge_hw_vpath_poll_rx().

◆ __vxge_hw_non_offload_db_post()

void __vxge_hw_non_offload_db_post ( struct __vxge_hw_fifo * fifo,
u64 txdl_ptr,
u32 num_txds )
static

__vxge_hw_non_offload_db_post - Post non offload doorbell

@fifo: fifohandle @txdl_ptr: The starting location of the TxDL in host memory @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)

This function posts a non-offload doorbell to doorbell FIFO

Definition at line 335 of file vxge_traffic.c.

337{
340 &fifo->nofl_db->control_0);
341
342 wmb();
343
344 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
345
346 wmb();
347}
#define wmb()
Definition io.h:546
struct __vxge_hw_non_offload_db_wrapper * nofl_db
#define VXGE_HW_NODBW_TYPE(val)
#define VXGE_HW_NODBW_TYPE_NODBW
#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val)

References __vxge_hw_non_offload_db_wrapper::control_0, __vxge_hw_fifo::nofl_db, __vxge_hw_non_offload_db_wrapper::txdl_ptr, u32, VXGE_HW_NODBW_LAST_TXD_NUMBER, VXGE_HW_NODBW_TYPE, VXGE_HW_NODBW_TYPE_NODBW, wmb, and writeq.

Referenced by vxge_hw_fifo_txdl_post().

◆ vxge_hw_fifo_free_txdl_get()

struct vxge_hw_fifo_txd * vxge_hw_fifo_free_txdl_get ( struct __vxge_hw_fifo * fifo)

vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo

@fifo: tx channel handle

Definition at line 355 of file vxge_traffic.c.

356{
357 struct vxge_hw_fifo_txd *txdp;
358
359 txdp = fifo->txdl + fifo->sw_offset;
360 if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
361 vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
362 __func__, __LINE__, fifo->sw_offset);
363 return NULL;
364 }
365
366 return txdp;
367}
#define NULL
NULL pointer (VOID *)
Definition Base.h:322
@ txdp
Definition sis900.h:30
struct vxge_hw_fifo_txd * txdl
struct vxge_hw_fifo_txd - Transmit Descriptor
#define vxge_debug(mask, fmt...)
#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
#define VXGE_ERR
Definition vxge_config.h:54

References NULL, __vxge_hw_fifo::sw_offset, __vxge_hw_fifo::txdl, txdp, vxge_debug, VXGE_ERR, and VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER.

Referenced by vxge_xmit().

◆ vxge_hw_fifo_txdl_buffer_set()

void vxge_hw_fifo_txdl_buffer_set ( struct __vxge_hw_fifo * fifo,
struct vxge_hw_fifo_txd * txdp,
struct io_buffer * iob )

vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the descriptor.

@fifo: Handle to the fifo object used for non offload send @txdlh: Descriptor handle. @iob: data buffer.

Definition at line 375 of file vxge_traffic.c.

378{
381 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
382
383 txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
385
386 txdp->host_control = (intptr_t)iob;
387 txdp->buffer_pointer = virt_to_bus(iob->data);
388}
unsigned long intptr_t
Definition stdint.h:21
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition io.h:184
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition iobuf.h:160
void * data
Start of data.
Definition iobuf.h:53
#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val)
#define VXGE_HW_FIFO_TXD_GATHER_CODE(val)
#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST
#define VXGE_HW_FIFO_TXD_INT_NUMBER(val)
@ VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST

References io_buffer::data, iob_len(), __vxge_hw_fifo::tx_intr_num, txdp, virt_to_bus(), VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST, VXGE_HW_FIFO_TXD_BUFFER_SIZE, VXGE_HW_FIFO_TXD_GATHER_CODE, VXGE_HW_FIFO_TXD_INT_NUMBER, and VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST.

Referenced by vxge_xmit().

◆ vxge_hw_fifo_txdl_post()

void vxge_hw_fifo_txdl_post ( struct __vxge_hw_fifo * fifo,
struct vxge_hw_fifo_txd * txdp )

vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.

@fifo: Handle to the fifo object used for non offload send @txdp: Tx Descriptor

Post descriptor on the 'fifo' type channel for transmission. Prior to posting the descriptor should be filled in accordance with Host/Titan interface specification for a given service (LL, etc.).

Definition at line 400 of file vxge_traffic.c.

402{
404
406
408}
static void vxge_hw_fifo_txd_offset_up(u16 *offset)
static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, u64 txdl_ptr, u32 num_txds)
__vxge_hw_non_offload_db_post - Post non offload doorbell

References __vxge_hw_non_offload_db_post(), __vxge_hw_fifo::sw_offset, txdp, virt_to_bus(), VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER, and vxge_hw_fifo_txd_offset_up().

Referenced by vxge_xmit().

◆ __vxge_hw_vpath_alarm_process()

enum vxge_hw_status __vxge_hw_vpath_alarm_process ( struct __vxge_hw_virtualpath * vpath)
static

Definition at line 418 of file vxge_traffic.c.

420{
421 u64 val64;
422 u64 alarm_status;
424 struct __vxge_hw_device *hldev = NULL;
425 struct vxge_hw_vpath_reg *vp_reg;
426
427 hldev = vpath->hldev;
428 vp_reg = vpath->vp_reg;
429 alarm_status = readq(&vp_reg->vpath_general_int_status);
430
431 if (alarm_status == VXGE_HW_ALL_FOXES) {
432
433 vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
434 hldev->ndev->name, __func__, __LINE__);
436 goto out;
437 }
438
439 if (alarm_status & ~(
444
445 vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
446 hldev->ndev->name, __func__, __LINE__);
448 goto out;
449 }
450
452
453 val64 = readq(&vp_reg->xgmac_vp_int_status);
454
455 if (val64 &
457
458 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
459
460 if (((val64 &
462 (!(val64 &
464 ((val64 &
466 && (!(val64 &
468 ))) {
470 &vp_reg->asic_ntwk_vp_err_mask);
471
472 netdev_link_down(hldev->ndev);
473 vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
474 hldev->ndev->name, __func__, __LINE__);
475 }
476
477 if (((val64 &
479 (!(val64 &
481 ((val64 &
483 && (!(val64 &
485 ))) {
487 &vp_reg->asic_ntwk_vp_err_mask);
488
489 netdev_link_up(hldev->ndev);
490 vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
491 hldev->ndev->name, __func__, __LINE__);
492 }
493
495 &vp_reg->asic_ntwk_vp_err_reg);
496 }
497 } else {
498 vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
499 hldev->ndev->name, __func__, __LINE__,
500 alarm_status);
501 }
502out:
503 return status;
504}
__be32 out[4]
Definition CIB_PRM.h:8
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition netdevice.c:231
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition netdevice.h:789
struct __vxge_hw_device - Hal device object @magic: Magic Number @bar0: BAR0 virtual address.
struct net_device * ndev
char name[NETDEV_NAME_LEN]
Name of this network device.
Definition netdevice.h:363
#define VXGE_INFO
Definition vxge_config.h:50
#define VXGE_INTR
Definition vxge_config.h:51
@ VXGE_HW_ERR_SLOT_FREEZE
@ VXGE_HW_FAIL
Definition vxge_config.h:72
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT
Definition vxge_reg.h:4489
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR
Definition vxge_reg.h:4247
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR
Definition vxge_reg.h:4245
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT
Definition vxge_reg.h:4491
#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT
Definition vxge_reg.h:4239
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT
Definition vxge_reg.h:4492
#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT
Definition vxge_reg.h:4490
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
Definition vxge_reg.h:4243
#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
Definition vxge_reg.h:4244
#define VXGE_HW_ALL_FOXES

References vxge_hw_vpath_reg::asic_ntwk_vp_err_mask, vxge_hw_vpath_reg::asic_ntwk_vp_err_reg, __vxge_hw_virtualpath::hldev, net_device::name, __vxge_hw_device::ndev, netdev_link_down(), netdev_link_up(), NULL, out, readq, status, __vxge_hw_virtualpath::vp_reg, vxge_hw_vpath_reg::vpath_general_int_status, vxge_debug, VXGE_ERR, VXGE_HW_ALL_FOXES, VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR, VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR, VXGE_HW_ERR_SLOT_FREEZE, VXGE_HW_FAIL, VXGE_HW_INTR_MASK_ALL, VXGE_HW_OK, VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT, VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT, VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT, VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT, VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT, VXGE_INFO, VXGE_INTR, writeq, and vxge_hw_vpath_reg::xgmac_vp_int_status.

Referenced by vxge_hw_device_begin_irq().

◆ vxge_hw_device_clear_tx_rx()

void vxge_hw_device_clear_tx_rx ( struct __vxge_hw_device * hldev)

vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the condition that has caused the Tx and RX interrupt.

@hldev: HW device.

Acknowledge (that is, clear) the condition that has caused the Tx and Rx interrupt. See also: vxge_hw_device_begin_irq(), vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().

Definition at line 516 of file vxge_traffic.c.

517{
518
519 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
520 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
523 &hldev->common_reg->tim_int_status0);
524 }
525
526 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
527 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
531 &hldev->common_reg->tim_int_status1);
532 }
533
534 return;
535}

References __vxge_hw_pio_mem_write32_upper(), __vxge_hw_device::common_reg, __vxge_hw_device::tim_int_mask0, __vxge_hw_device::tim_int_mask1, vxge_hw_common_reg::tim_int_status0, vxge_hw_common_reg::tim_int_status1, VXGE_HW_VPATH_INTR_RX, VXGE_HW_VPATH_INTR_TX, and writeq.

Referenced by vxge_hw_device_begin_irq().

◆ vxge_hw_device_begin_irq()

enum vxge_hw_status vxge_hw_device_begin_irq ( struct __vxge_hw_device * hldev)

vxge_hw_device_begin_irq - Begin IRQ processing.

@hldev: HW device handle.

The function performs two actions, It first checks whether (shared IRQ) the interrupt was raised by the device. Next, it masks the device interrupts.

Note: vxge_hw_device_begin_irq() does not flush MMIO writes through the bridge. Therefore, two back-to-back interrupts are potentially possible.

Returns: 0, if the interrupt is not "ours" (note that in this case the device remain enabled). Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter status.

Definition at line 555 of file vxge_traffic.c.

556{
557 u64 val64;
559 u64 vpath_mask;
560 enum vxge_hw_status ret = VXGE_HW_OK;
561
562 val64 = readq(&hldev->common_reg->titan_general_int_status);
563
564 if (!val64) {
566 goto exit;
567 }
568
569 if (val64 == VXGE_HW_ALL_FOXES) {
570
572
574
575 vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
576 "occurred\n", hldev->ndev->name,
577 __func__, __LINE__);
579 goto exit;
580 }
581 }
582
583 vpath_mask = hldev->vpaths_deployed >>
586 vpath_mask))
588
591
592exit:
593 return ret;
594}
@ VXGE_HW_ERR_WRONG_IRQ
Definition vxge_config.h:88
#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val)
Definition vxge_reg.h:807
#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT
Definition vxge_reg.h:806
u64 adapter_status
Definition vxge_reg.h:121
void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the condition that has caused the Tx and RX...
static enum vxge_hw_status __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath)
#define VXGE_HW_MAX_VIRTUAL_PATHS

References __vxge_hw_vpath_alarm_process(), adapter_status, vxge_hw_common_reg::adapter_status, __vxge_hw_device::common_reg, net_device::name, __vxge_hw_device::ndev, readq, vxge_hw_common_reg::titan_general_int_status, __vxge_hw_device::virtual_path, __vxge_hw_device::vpaths_deployed, vxge_debug, VXGE_ERR, VXGE_HW_ALL_FOXES, vxge_hw_device_clear_tx_rx(), VXGE_HW_ERR_SLOT_FREEZE, VXGE_HW_ERR_WRONG_IRQ, VXGE_HW_MAX_VIRTUAL_PATHS, VXGE_HW_OK, VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT, and VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT.

Referenced by vxge_poll().

◆ vxge_hw_vpath_doorbell_rx()

void vxge_hw_vpath_doorbell_rx ( struct __vxge_hw_ring * ring)

vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.

@ring: Handle to the ring object used for receive

The function writes the number of qwords of rxds posted during replishment. Since the function is called frequently, a flush is not required to post the write transaction. At the very least, the previous write will be flushed once the subsequent write is made.

Returns: None.

Definition at line 608 of file vxge_traffic.c.

609{
610 u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
612
614
616
617 if (ring->total_db_cnt >= rxds_qw_per_block) {
618 /* For each block add 4 more qwords */
620
621 /* Reset total count */
622 ring->total_db_cnt -= rxds_qw_per_block;
623 }
624
625 if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
626 wmb();
628 ring->doorbell_cnt),
629 &ring->vp_reg->prc_rxd_doorbell);
630 ring->doorbell_cnt = 0;
631 }
632}
struct vxge_hw_vpath_reg * vp_reg
#define VXGE_HW_RING_RXD_QWORDS_MODE_1
#define VXGE_HW_MAX_RXDS_PER_BLOCK_1
#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val)
Definition vxge_reg.h:4099

References __vxge_hw_ring::doorbell_cnt, vxge_hw_vpath_reg::prc_rxd_doorbell, __vxge_hw_ring::rxd_qword_limit, __vxge_hw_ring::total_db_cnt, u32, __vxge_hw_ring::vp_reg, VXGE_HW_MAX_RXDS_PER_BLOCK_1, VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT, VXGE_HW_RING_RXD_QWORDS_MODE_1, wmb, and writeq.

Referenced by vxge_hw_vpath_poll_rx().

◆ vxge_hw_vpath_poll_rx()

enum vxge_hw_status vxge_hw_vpath_poll_rx ( struct __vxge_hw_ring * ring)

Definition at line 642 of file vxge_traffic.c.

643{
644 struct __vxge_hw_device *hldev;
646 struct vxge_hw_ring_rxd_1 *rxd;
647 unsigned int len;
648 enum vxge_hw_ring_tcode tcode;
649 struct io_buffer *rx_iob, *iobuf = NULL;
650 u16 poll_count = 0;
651
652 hldev = ring->vpathh->hldev;
653
654 do {
655 rxd = &ring->rxdl->rxd[ring->rxd_offset];
656 tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
657
658 /* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
659 * possible the ownership bit still set to adapter
660 */
661 if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
662 && (tcode == VXGE_HW_RING_T_CODE_OK)) {
663
665 goto err0;
666 }
667
668 vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
669 hldev->ndev->name, ring->rxd_offset);
670
671 iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
672
673 if (tcode != VXGE_HW_RING_T_CODE_OK) {
674 netdev_rx_err(hldev->ndev, NULL, -EINVAL);
675 vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
676 __func__, __LINE__, tcode);
678 goto err1;
679 }
680
682 len -= ETH_FCS_LEN;
683
684 rx_iob = alloc_iob(len);
685 if (!rx_iob) {
686 netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
687 vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
688 __func__, __LINE__);
690 goto err1;
691 }
692
693 memcpy(iob_put(rx_iob, len), iobuf->data, len);
694 /* Add this packet to the receive queue. */
695 netdev_rx(hldev->ndev, rx_iob);
696
697err1:
698 /* repost the rxd */
699 rxd->control_0 = rxd->control_1 = 0;
703
704 /* repost the qword count for doorbell */
706
707 /* increment the descriptor offset */
709
710 } while (++poll_count < ring->rx_poll_weight);
711err0:
712 return status;
713}
#define ETH_FCS_LEN
Definition atl1e.h:45
#define rxd
Definition davicom.c:146
ring len
Length.
Definition dwmac.h:226
#define EINVAL
Invalid argument.
Definition errno.h:429
#define ENOMEM
Not enough space.
Definition errno.h:535
void * memcpy(void *dest, const void *src, size_t len) __nonnull
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition iobuf.c:131
#define iob_put(iobuf, len)
Definition iobuf.h:125
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition netdevice.c:549
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition netdevice.c:587
struct vxgedev * vdev
struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1]
struct __vxge_hw_ring_block * rxdl
struct __vxge_hw_virtualpath * vpathh
A persistent I/O buffer.
Definition iobuf.h:38
struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
#define u16
Definition vga.h:20
#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1)
#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0)
static void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp, struct io_buffer *iob, u32 size)
vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
@ VXGE_HW_ERR_OUT_OF_MEMORY
Definition vxge_config.h:85
@ VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
Definition vxge_config.h:76
static void vxge_hw_ring_rxd_offset_up(u16 *offset)
#define VXGE_LL_MAX_FRAME_SIZE(dev)
Definition vxge_main.h:147
void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.
void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused, struct vxge_hw_ring_rxd_1 *rxdp)
vxge_hw_ring_rxd_post - Post descriptor on the ring.
vxge_hw_ring_tcode
enum vxge_hw_ring_tcode - Transfer codes returned by adapter @VXGE_HW_RING_T_CODE_OK: Transfer ok.
@ VXGE_HW_RING_T_CODE_OK

References alloc_iob(), io_buffer::data, EINVAL, ENOMEM, ETH_FCS_LEN, __vxge_hw_virtualpath::hldev, iob_put, len, memcpy(), net_device::name, __vxge_hw_device::ndev, netdev_rx(), netdev_rx_err(), NULL, __vxge_hw_ring_block::rxd, rxd, __vxge_hw_ring::rxd_offset, __vxge_hw_ring::rxdl, status, u16, __vxge_hw_device::vdev, __vxge_hw_ring::vpathh, vxge_debug, VXGE_ERR, VXGE_HW_ERR_OUT_OF_MEMORY, VXGE_HW_FAIL, VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS, VXGE_HW_OK, VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET, vxge_hw_ring_rxd_1b_set(), VXGE_HW_RING_RXD_LIST_OWN_ADAPTER, vxge_hw_ring_rxd_offset_up(), vxge_hw_ring_rxd_post(), VXGE_HW_RING_RXD_T_CODE_GET, VXGE_HW_RING_T_CODE_OK, vxge_hw_vpath_doorbell_rx(), VXGE_INFO, and VXGE_LL_MAX_FRAME_SIZE.

Referenced by vxge_poll().

◆ vxge_hw_vpath_poll_tx()

enum vxge_hw_status vxge_hw_vpath_poll_tx ( struct __vxge_hw_fifo * fifo)

vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.

@fifo: Handle to the fifo object used for non offload send

The function polls the Tx for the completed descriptors and calls the driver via supplied completion callback.

Definition at line 723 of file vxge_traffic.c.

724{
726 struct vxge_hw_fifo_txd *txdp;
727
728 txdp = fifo->txdl + fifo->hw_offset;
729 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
730 && (txdp->host_control)) {
731
732 vxge_xmit_compl(fifo, txdp,
734
736 }
737
738 return status;
739}
#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0)
enum vxge_hw_status vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode)
Definition vxge_main.c:65

References __vxge_hw_fifo::hw_offset, status, __vxge_hw_fifo::txdl, txdp, VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER, vxge_hw_fifo_txd_offset_up(), VXGE_HW_FIFO_TXD_T_CODE_GET, VXGE_HW_OK, and vxge_xmit_compl().

Referenced by vxge_poll().