iPXE
vxge_traffic.h File Reference
#include <stdint.h>
#include <ipxe/if_ether.h>
#include <ipxe/iobuf.h>
#include "vxge_reg.h"
#include "vxge_version.h"

Go to the source code of this file.

Macros

#define VXGE_TRAFFIC_H
#define VXGE_HW_DTR_MAX_T_CODE   16
#define VXGE_HW_ALL_FOXES   0xFFFFFFFFFFFFFFFFULL
#define VXGE_HW_INTR_MASK_ALL   0xFFFFFFFFFFFFFFFFULL
#define VXGE_HW_MAX_VIRTUAL_PATHS   17
#define VXGE_HW_MAX_VIRTUAL_FUNCTIONS   8
#define VXGE_HW_MAC_MAX_MAC_PORT_ID   3
#define VXGE_HW_DEFAULT_32   0xffffffff
#define VXGE_HW_HEADER_802_2_SIZE   3
#define VXGE_HW_HEADER_SNAP_SIZE   5
#define VXGE_HW_HEADER_VLAN_SIZE   4
#define VXGE_HW_MAC_HEADER_MAX_SIZE
#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN   0x12
#define VXGE_HW_HEADER_802_2_SNAP_ALIGN   2
#define VXGE_HW_HEADER_802_2_ALIGN   3
#define VXGE_HW_HEADER_SNAP_ALIGN   1
#define VXGE_HW_L3_CKSUM_OK   0xFFFF
#define VXGE_HW_L4_CKSUM_OK   0xFFFF
#define TRUE   1
#define FALSE   0
#define VXGE_HW_EVENT_BASE   0
#define VXGE_LL_EVENT_BASE   100
#define VXGE_HW_MAX_INTR_PER_VP   4
#define VXGE_HW_VPATH_INTR_TX   0
#define VXGE_HW_VPATH_INTR_RX   1
#define VXGE_HW_VPATH_INTR_EINTA   2
#define VXGE_HW_VPATH_INTR_BMAP   3
#define VXGE_HW_BLOCK_SIZE   4096
#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL   17
#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL   18
#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL   19
#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH   63

Enumerations

enum  vxge_hw_event {
  VXGE_HW_EVENT_UNKNOWN = 0 , VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1 , VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2 , VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3 ,
  VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4 , VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5 , VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6 , VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7 ,
  VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8 , VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9 , VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10 , VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11 ,
  VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12 , VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13 , VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14
}
 enum vxge_hw_event- Enumerates slow-path HW events. More...
enum  vxge_hw_ring_tcode {
  VXGE_HW_RING_T_CODE_OK = 0x0 , VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1 , VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2 , VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3 ,
  VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5 , VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6 , VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7 , VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8 ,
  VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9 , VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA , VXGE_HW_RING_T_CODE_FRM_DROP = 0xC , VXGE_HW_RING_T_CODE_UNUSED = 0xE ,
  VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
}
 enum vxge_hw_ring_tcode - Transfer codes returned by adapter @VXGE_HW_RING_T_CODE_OK: Transfer ok. More...
enum  vxge_hw_fifo_gather_code { VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2 , VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0 , VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1 , VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3 }
 enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL. More...
enum  vxge_hw_fifo_tcode {
  VXGE_HW_FIFO_T_CODE_OK = 0x0 , VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1 , VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2 , VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3 ,
  VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4 , VXGE_HW_FIFO_T_CODE_UNUSED = 0x7 , VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
}
 enum enum vxge_hw_fifo_tcode - tcodes used in fifo @VXGE_HW_FIFO_T_CODE_OK: Transfer OK @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or frame data) returned with corrupt data. More...

Functions

 FILE_LICENCE (GPL2_ONLY)
enum vxge_hw_status vxge_hw_ring_replenish (struct __vxge_hw_ring *ring)
void vxge_hw_ring_rxd_post (struct __vxge_hw_ring *ring_handle, struct vxge_hw_ring_rxd_1 *rxdp)
void vxge_hw_fifo_txdl_buffer_set (struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp, struct io_buffer *iob)
 vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the descriptor.
void vxge_hw_fifo_txdl_post (struct __vxge_hw_fifo *fifo, struct vxge_hw_fifo_txd *txdp)
 vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
enum vxge_hw_status __vxge_hw_ring_create (struct __vxge_hw_virtualpath *vpath, struct __vxge_hw_ring *ring)
enum vxge_hw_status __vxge_hw_ring_delete (struct __vxge_hw_ring *ringh)
enum vxge_hw_status __vxge_hw_fifo_create (struct __vxge_hw_virtualpath *vpath, struct __vxge_hw_fifo *fifo)
enum vxge_hw_status __vxge_hw_fifo_delete (struct __vxge_hw_fifo *fifo)
enum vxge_hw_status __vxge_hw_vpath_reset (struct __vxge_hw_device *devh, u32 vp_id)
enum vxge_hw_status __vxge_hw_vpath_enable (struct __vxge_hw_device *devh, u32 vp_id)
void __vxge_hw_vpath_prc_configure (struct __vxge_hw_device *hldev)
enum vxge_hw_status __vxge_hw_vpath_kdfc_configure (struct __vxge_hw_device *devh, u32 vp_id)
enum vxge_hw_status __vxge_hw_vpath_mac_configure (struct __vxge_hw_device *devh)
enum vxge_hw_status __vxge_hw_vpath_tim_configure (struct __vxge_hw_device *devh, u32 vp_id)
enum vxge_hw_status __vxge_hw_vpath_initialize (struct __vxge_hw_device *devh, u32 vp_id)
enum vxge_hw_status __vxge_hw_vp_initialize (struct __vxge_hw_device *hldev, u32 vp_id, struct __vxge_hw_virtualpath *vpath)
void __vxge_hw_vp_terminate (struct __vxge_hw_device *hldev, struct __vxge_hw_virtualpath *vpath)
enum vxge_hw_status vxge_hw_device_begin_irq (struct __vxge_hw_device *hldev)
 vxge_hw_device_begin_irq - Begin IRQ processing.
void vxge_hw_device_intr_enable (struct __vxge_hw_device *hldev)
 vxge_hw_device_intr_enable - Enable interrupts.
void vxge_hw_device_intr_disable (struct __vxge_hw_device *hldev)
 vxge_hw_device_intr_disable - Disable Titan interrupts.
void vxge_hw_device_mask_all (struct __vxge_hw_device *hldev)
 vxge_hw_device_mask_all - Mask all device interrupts.
void vxge_hw_device_unmask_all (struct __vxge_hw_device *hldev)
 vxge_hw_device_unmask_all - Unmask all device interrupts.
void vxge_hw_vpath_doorbell_rx (struct __vxge_hw_ring *ringh)
 vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.
enum vxge_hw_status vxge_hw_vpath_poll_rx (struct __vxge_hw_ring *ringh)
enum vxge_hw_status vxge_hw_vpath_poll_tx (struct __vxge_hw_fifo *fifo)
 vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
struct vxge_hw_fifo_txdvxge_hw_fifo_free_txdl_get (struct __vxge_hw_fifo *fifo)
 vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo

Macro Definition Documentation

◆ VXGE_TRAFFIC_H

#define VXGE_TRAFFIC_H

Definition at line 18 of file vxge_traffic.h.

◆ VXGE_HW_DTR_MAX_T_CODE

#define VXGE_HW_DTR_MAX_T_CODE   16

Definition at line 27 of file vxge_traffic.h.

◆ VXGE_HW_ALL_FOXES

#define VXGE_HW_ALL_FOXES   0xFFFFFFFFFFFFFFFFULL

Definition at line 28 of file vxge_traffic.h.

Referenced by __vxge_hw_vpath_alarm_process(), and vxge_hw_device_begin_irq().

◆ VXGE_HW_INTR_MASK_ALL

#define VXGE_HW_INTR_MASK_ALL   0xFFFFFFFFFFFFFFFFULL

◆ VXGE_HW_MAX_VIRTUAL_PATHS

◆ VXGE_HW_MAX_VIRTUAL_FUNCTIONS

#define VXGE_HW_MAX_VIRTUAL_FUNCTIONS   8

Definition at line 32 of file vxge_traffic.h.

◆ VXGE_HW_MAC_MAX_MAC_PORT_ID

#define VXGE_HW_MAC_MAX_MAC_PORT_ID   3

Definition at line 34 of file vxge_traffic.h.

Referenced by __vxge_hw_vpath_mgmt_read(), and vxge_hw_vpath_strip_fcs_check().

◆ VXGE_HW_DEFAULT_32

#define VXGE_HW_DEFAULT_32   0xffffffff

Definition at line 36 of file vxge_traffic.h.

Referenced by vxge_hw_device_intr_disable().

◆ VXGE_HW_HEADER_802_2_SIZE

#define VXGE_HW_HEADER_802_2_SIZE   3

Definition at line 38 of file vxge_traffic.h.

◆ VXGE_HW_HEADER_SNAP_SIZE

#define VXGE_HW_HEADER_SNAP_SIZE   5

Definition at line 39 of file vxge_traffic.h.

◆ VXGE_HW_HEADER_VLAN_SIZE

#define VXGE_HW_HEADER_VLAN_SIZE   4

Definition at line 40 of file vxge_traffic.h.

◆ VXGE_HW_MAC_HEADER_MAX_SIZE

#define VXGE_HW_MAC_HEADER_MAX_SIZE
Value:
(ETH_HLEN + \
#define ETH_HLEN
Definition if_ether.h:10
#define VXGE_HW_HEADER_802_2_SIZE
#define VXGE_HW_HEADER_SNAP_SIZE
#define VXGE_HW_HEADER_VLAN_SIZE

Definition at line 41 of file vxge_traffic.h.

41#define VXGE_HW_MAC_HEADER_MAX_SIZE \
42 (ETH_HLEN + \
43 VXGE_HW_HEADER_802_2_SIZE + \
44 VXGE_HW_HEADER_VLAN_SIZE + \
45 VXGE_HW_HEADER_SNAP_SIZE)

Referenced by __vxge_hw_vpath_mgmt_read(), and vxge_hw_vpath_mtu_set().

◆ VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN

#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN   0x12

Definition at line 61 of file vxge_traffic.h.

◆ VXGE_HW_HEADER_802_2_SNAP_ALIGN

#define VXGE_HW_HEADER_802_2_SNAP_ALIGN   2

Definition at line 62 of file vxge_traffic.h.

◆ VXGE_HW_HEADER_802_2_ALIGN

#define VXGE_HW_HEADER_802_2_ALIGN   3

Definition at line 63 of file vxge_traffic.h.

◆ VXGE_HW_HEADER_SNAP_ALIGN

#define VXGE_HW_HEADER_SNAP_ALIGN   1

Definition at line 64 of file vxge_traffic.h.

◆ VXGE_HW_L3_CKSUM_OK

#define VXGE_HW_L3_CKSUM_OK   0xFFFF

Definition at line 66 of file vxge_traffic.h.

◆ VXGE_HW_L4_CKSUM_OK

#define VXGE_HW_L4_CKSUM_OK   0xFFFF

Definition at line 67 of file vxge_traffic.h.

◆ TRUE

#define TRUE   1

Definition at line 78 of file vxge_traffic.h.

◆ FALSE

#define FALSE   0

Definition at line 82 of file vxge_traffic.h.

◆ VXGE_HW_EVENT_BASE

#define VXGE_HW_EVENT_BASE   0

Definition at line 86 of file vxge_traffic.h.

◆ VXGE_LL_EVENT_BASE

#define VXGE_LL_EVENT_BASE   100

Definition at line 87 of file vxge_traffic.h.

◆ VXGE_HW_MAX_INTR_PER_VP

#define VXGE_HW_MAX_INTR_PER_VP   4

Definition at line 129 of file vxge_traffic.h.

Referenced by __vxge_hw_fifo_create(), and __vxge_hw_vpath_tim_configure().

◆ VXGE_HW_VPATH_INTR_TX

#define VXGE_HW_VPATH_INTR_TX   0

◆ VXGE_HW_VPATH_INTR_RX

#define VXGE_HW_VPATH_INTR_RX   1

◆ VXGE_HW_VPATH_INTR_EINTA

#define VXGE_HW_VPATH_INTR_EINTA   2

Definition at line 132 of file vxge_traffic.h.

Referenced by __vxge_hw_vpath_tim_configure().

◆ VXGE_HW_VPATH_INTR_BMAP

#define VXGE_HW_VPATH_INTR_BMAP   3

Definition at line 133 of file vxge_traffic.h.

Referenced by __vxge_hw_vpath_tim_configure().

◆ VXGE_HW_BLOCK_SIZE

#define VXGE_HW_BLOCK_SIZE   4096

Definition at line 135 of file vxge_traffic.h.

◆ VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL

#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL   17

Definition at line 137 of file vxge_traffic.h.

Referenced by __vxge_hw_vpath_tim_configure().

◆ VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL

#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL   18

Definition at line 138 of file vxge_traffic.h.

Referenced by __vxge_hw_vpath_tim_configure().

◆ VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL

#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL   19

Definition at line 139 of file vxge_traffic.h.

◆ VXGE_HW_TIM_UTIL_SEL_PER_VPATH

#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH   63

Definition at line 140 of file vxge_traffic.h.

Enumeration Type Documentation

◆ vxge_hw_event

enum vxge_hw_event- Enumerates slow-path HW events.

@VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event. @VXGE_HW_EVENT_SERR: Serious vpath hardware error event. @VXGE_HW_EVENT_ECCERR: vpath ECC error event. @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error. @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event. @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event. @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event. @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish slot-freeze from the rest critical events (e.g. ECC) when it is impossible to PIO read "through" the bus, i.e. when getting all-foxes.

enum vxge_hw_event enumerates slow-path HW eventis.

See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{}, vxge_uld_link_down_f{}.

Enumerator
VXGE_HW_EVENT_UNKNOWN 
VXGE_HW_EVENT_RESET_START 
VXGE_HW_EVENT_RESET_COMPLETE 
VXGE_HW_EVENT_LINK_DOWN 
VXGE_HW_EVENT_LINK_UP 
VXGE_HW_EVENT_ALARM_CLEARED 
VXGE_HW_EVENT_ECCERR 
VXGE_HW_EVENT_MRPCIM_ECCERR 
VXGE_HW_EVENT_FIFO_ERR 
VXGE_HW_EVENT_VPATH_ERR 
VXGE_HW_EVENT_CRITICAL_ERR 
VXGE_HW_EVENT_SERR 
VXGE_HW_EVENT_SRPCIM_SERR 
VXGE_HW_EVENT_MRPCIM_SERR 
VXGE_HW_EVENT_SLOT_FREEZE 

Definition at line 110 of file vxge_traffic.h.

110 {
112 /* HW events */
127};
#define VXGE_HW_EVENT_BASE
Definition vxge_config.h:61
@ VXGE_HW_EVENT_ALARM_CLEARED
@ VXGE_HW_EVENT_FIFO_ERR
@ VXGE_HW_EVENT_RESET_START
@ VXGE_HW_EVENT_SERR
@ VXGE_HW_EVENT_LINK_DOWN
@ VXGE_HW_EVENT_CRITICAL_ERR
@ VXGE_HW_EVENT_VPATH_ERR
@ VXGE_HW_EVENT_RESET_COMPLETE
@ VXGE_HW_EVENT_MRPCIM_SERR
@ VXGE_HW_EVENT_ECCERR
@ VXGE_HW_EVENT_SRPCIM_SERR
@ VXGE_HW_EVENT_MRPCIM_ECCERR
@ VXGE_HW_EVENT_UNKNOWN
@ VXGE_HW_EVENT_SLOT_FREEZE
@ VXGE_HW_EVENT_LINK_UP

◆ vxge_hw_ring_tcode

enum vxge_hw_ring_tcode - Transfer codes returned by adapter @VXGE_HW_RING_T_CODE_OK: Transfer ok.

@VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation configuration mismatch. @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation configuration mismatch. @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum presentation configuration mismatch. @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet, such as unknown IPv6 header. @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity error, such as FCS or ECC). @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer( s) were not appropriately sized and data loss occurred. @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted. @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of Segment1 exceeded the capacity of Buffer1 and the remainder was placed in Buffer2. Segment2 now starts in Buffer3. No data loss or errors occurred. @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs assigned buffers has a size of 0 bytes. @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to VPath Reset or because of a VPIN mismatch. @VXGE_HW_RING_T_CODE_UNUSED: Unused @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one transfer code condition occurred.

Transfer codes returned by adapter.

Enumerator
VXGE_HW_RING_T_CODE_OK 
VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH 
VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH 
VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH 
VXGE_HW_RING_T_CODE_L3_PKT_ERR 
VXGE_HW_RING_T_CODE_L2_FRM_ERR 
VXGE_HW_RING_T_CODE_BUF_SIZE_ERR 
VXGE_HW_RING_T_CODE_INT_ECC_ERR 
VXGE_HW_RING_T_CODE_BENIGN_OVFLOW 
VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF 
VXGE_HW_RING_T_CODE_FRM_DROP 
VXGE_HW_RING_T_CODE_UNUSED 
VXGE_HW_RING_T_CODE_MULTI_ERR 

Definition at line 172 of file vxge_traffic.h.

172 {
186};
@ VXGE_HW_RING_T_CODE_BENIGN_OVFLOW
@ VXGE_HW_RING_T_CODE_L3_PKT_ERR
@ VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH
@ VXGE_HW_RING_T_CODE_INT_ECC_ERR
@ VXGE_HW_RING_T_CODE_FRM_DROP
@ VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH
@ VXGE_HW_RING_T_CODE_L2_FRM_ERR
@ VXGE_HW_RING_T_CODE_UNUSED
@ VXGE_HW_RING_T_CODE_OK
@ VXGE_HW_RING_T_CODE_BUF_SIZE_ERR
@ VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF
@ VXGE_HW_RING_T_CODE_MULTI_ERR
@ VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH

◆ vxge_hw_fifo_gather_code

enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.

These gather codes are used to indicate the position of a TxD in a TxD list

Enumerator
VXGE_HW_FIFO_GATHER_CODE_FIRST 
VXGE_HW_FIFO_GATHER_CODE_MIDDLE 
VXGE_HW_FIFO_GATHER_CODE_LAST 
VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST 

Definition at line 198 of file vxge_traffic.h.

198 {
203};
@ VXGE_HW_FIFO_GATHER_CODE_LAST
@ VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST
@ VXGE_HW_FIFO_GATHER_CODE_MIDDLE
@ VXGE_HW_FIFO_GATHER_CODE_FIRST

◆ vxge_hw_fifo_tcode

enum enum vxge_hw_fifo_tcode - tcodes used in fifo @VXGE_HW_FIFO_T_CODE_OK: Transfer OK @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or frame data) returned with corrupt data.

@VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned with no data. @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a frame or LSO MSS that was too long (>9800B). @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send Offload operation, due to improper header template, unsupported protocol, etc. @VXGE_HW_FIFO_T_CODE_UNUSED: Unused @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple data buffer transfer errors are encountered (see below). Otherwise it is set to 0.

These tcodes are returned in various API for TxD status

Enumerator
VXGE_HW_FIFO_T_CODE_OK 
VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT 
VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL 
VXGE_HW_FIFO_T_CODE_INVALID_MSS 
VXGE_HW_FIFO_T_CODE_LSO_ERROR 
VXGE_HW_FIFO_T_CODE_UNUSED 
VXGE_HW_FIFO_T_CODE_MULTI_ERROR 

Definition at line 224 of file vxge_traffic.h.

224 {
232};
@ VXGE_HW_FIFO_T_CODE_MULTI_ERROR
@ VXGE_HW_FIFO_T_CODE_UNUSED
@ VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL
@ VXGE_HW_FIFO_T_CODE_OK
@ VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT
@ VXGE_HW_FIFO_T_CODE_INVALID_MSS
@ VXGE_HW_FIFO_T_CODE_LSO_ERROR

Function Documentation

◆ FILE_LICENCE()

FILE_LICENCE ( GPL2_ONLY )

◆ vxge_hw_ring_replenish()

enum vxge_hw_status vxge_hw_ring_replenish ( struct __vxge_hw_ring * ring)

Definition at line 558 of file vxge_config.c.

559{
560 struct __vxge_hw_device *hldev;
561 struct vxge_hw_ring_rxd_1 *rxd;
563 u8 offset = 0;
565 u8 i, iob_off;
566
567 vxge_trace();
568
569 hldev = ring->vpathh->hldev;
570 /*
571 * We allocate all the dma buffers first and then share the
572 * these buffers among the all rx descriptors in the block.
573 */
574 for (i = 0; i < ARRAY_SIZE(ring->iobuf); i++) {
575 ring->iobuf[i] = alloc_iob(VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
576 if (!ring->iobuf[i]) {
577 while (i) {
578 free_iob(ring->iobuf[--i]);
579 ring->iobuf[i] = NULL;
580 }
582 goto iobuf_err;
583 }
584 }
585
587
588 rxd = &ring->rxdl->rxd[offset];
591 else
592 iob_off = offset % ring->buf_per_block;
593
594 rxd->control_0 = rxd->control_1 = 0;
595 vxge_hw_ring_rxd_1b_set(rxd, ring->iobuf[iob_off],
597
599 }
600 /* linking the block to itself as we use only one rx block*/
601 block = ring->rxdl;
602 block->reserved_2_pNext_RxD_block = (unsigned long) block;
603 block->pNext_RxD_Blk_physical = (u64)virt_to_bus(block);
604
605 ring->rxd_offset = 0;
606iobuf_err:
607 return status;
608}
#define NULL
NULL pointer (VOID *)
Definition Base.h:322
uint16_t offset
Offset to command line.
Definition bzimage.h:3
#define rxd
Definition davicom.c:146
#define ARRAY_SIZE(x)
Definition efx_common.h:43
uint8_t status
Status.
Definition ena.h:5
#define u8
Definition igbvf_osdep.h:40
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition io.h:184
uint64_t u64
Definition stdint.h:26
void free_iob(struct io_buffer *iobuf)
Free I/O buffer.
Definition iobuf.c:153
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition iobuf.c:131
uint8_t block[3][8]
DES-encrypted blocks.
Definition mschapv2.h:1
struct __vxge_hw_device - Hal device object @magic: Magic Number @bar0: BAR0 virtual address.
struct vxgedev * vdev
struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1]
struct io_buffer * iobuf[VXGE_HW_RING_BUF_PER_BLOCK+1]
struct __vxge_hw_ring_block * rxdl
struct __vxge_hw_virtualpath * vpathh
struct __vxge_hw_device * hldev
struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
#define vxge_trace()
#define VXGE_HW_RING_BUF_PER_BLOCK
static void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp, struct io_buffer *iob, u32 size)
vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
vxge_hw_status
Definition vxge_config.h:70
@ VXGE_HW_OK
Definition vxge_config.h:71
@ VXGE_HW_ERR_OUT_OF_MEMORY
Definition vxge_config.h:85
#define VXGE_HW_MAX_RXDS_PER_BLOCK_1
#define VXGE_LL_MAX_FRAME_SIZE(dev)
Definition vxge_main.h:147
void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused, struct vxge_hw_ring_rxd_1 *rxdp)
vxge_hw_ring_rxd_post - Post descriptor on the ring.

References alloc_iob(), ARRAY_SIZE, block, __vxge_hw_ring::buf_per_block, free_iob(), __vxge_hw_virtualpath::hldev, __vxge_hw_ring::iobuf, NULL, offset, __vxge_hw_ring_block::rxd, rxd, __vxge_hw_ring::rxd_offset, __vxge_hw_ring::rxdl, status, u8, __vxge_hw_device::vdev, virt_to_bus(), __vxge_hw_ring::vpathh, VXGE_HW_ERR_OUT_OF_MEMORY, VXGE_HW_MAX_RXDS_PER_BLOCK_1, VXGE_HW_OK, VXGE_HW_RING_BUF_PER_BLOCK, vxge_hw_ring_rxd_1b_set(), vxge_hw_ring_rxd_post(), VXGE_LL_MAX_FRAME_SIZE, and vxge_trace.

Referenced by __vxge_hw_ring_create().

◆ vxge_hw_ring_rxd_post()

void vxge_hw_ring_rxd_post ( struct __vxge_hw_ring * ring_handle,
struct vxge_hw_ring_rxd_1 * rxdp )

References rxdp, txdp, and u32.

◆ vxge_hw_fifo_txdl_buffer_set()

void vxge_hw_fifo_txdl_buffer_set ( struct __vxge_hw_fifo * fifo,
struct vxge_hw_fifo_txd * txdp,
struct io_buffer * iob )

vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the descriptor.

@fifo: Handle to the fifo object used for non offload send @txdlh: Descriptor handle. @iob: data buffer.

Definition at line 375 of file vxge_traffic.c.

378{
381 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
382
383 txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
385
386 txdp->host_control = (intptr_t)iob;
387 txdp->buffer_pointer = virt_to_bus(iob->data);
388}
unsigned long intptr_t
Definition stdint.h:21
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition iobuf.h:160
@ txdp
Definition sis900.h:30
void * data
Start of data.
Definition iobuf.h:53
#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val)
#define VXGE_HW_FIFO_TXD_GATHER_CODE(val)
#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST
#define VXGE_HW_FIFO_TXD_INT_NUMBER(val)

References io_buffer::data, iob_len(), __vxge_hw_fifo::tx_intr_num, txdp, virt_to_bus(), VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST, VXGE_HW_FIFO_TXD_BUFFER_SIZE, VXGE_HW_FIFO_TXD_GATHER_CODE, VXGE_HW_FIFO_TXD_INT_NUMBER, and VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST.

Referenced by vxge_xmit().

◆ vxge_hw_fifo_txdl_post()

void vxge_hw_fifo_txdl_post ( struct __vxge_hw_fifo * fifo,
struct vxge_hw_fifo_txd * txdp )

vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.

@fifo: Handle to the fifo object used for non offload send @txdp: Tx Descriptor

Post descriptor on the 'fifo' type channel for transmission. Prior to posting the descriptor should be filled in accordance with Host/Titan interface specification for a given service (LL, etc.).

Definition at line 400 of file vxge_traffic.c.

402{
404
406
408}
static void vxge_hw_fifo_txd_offset_up(u16 *offset)
#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, u64 txdl_ptr, u32 num_txds)
__vxge_hw_non_offload_db_post - Post non offload doorbell

References __vxge_hw_non_offload_db_post(), __vxge_hw_fifo::sw_offset, txdp, virt_to_bus(), VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER, and vxge_hw_fifo_txd_offset_up().

Referenced by vxge_xmit().

◆ __vxge_hw_ring_create()

enum vxge_hw_status __vxge_hw_ring_create ( struct __vxge_hw_virtualpath * vpath,
struct __vxge_hw_ring * ring )

Definition at line 616 of file vxge_config.c.

618{
620 struct __vxge_hw_device *hldev;
621 u32 vp_id;
622
623 vxge_trace();
624
625 hldev = vpath->hldev;
626 vp_id = vpath->vp_id;
627
628 ring->rxdl = malloc_phys(sizeof(struct __vxge_hw_ring_block),
629 sizeof(struct __vxge_hw_ring_block));
630 if (!ring->rxdl) {
631 vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n",
632 __func__, __LINE__);
634 goto exit;
635 }
636 ring->rxd_offset = 0;
637 ring->vpathh = vpath;
640 ring->vp_id = vp_id;
641 ring->vp_reg = vpath->vp_reg;
642 ring->common_reg = hldev->common_reg;
643
645
647 if (status != VXGE_HW_OK) {
649 goto exit;
650 }
651exit:
652 return status;
653}
void * malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition malloc.c:707
struct vxge_hw_common_reg * common_reg
struct vxge_hw_vpath_reg * vp_reg
struct vxge_hw_common_reg * common_reg
struct vxge_hw_vpath_reg * vp_reg
#define u32
Definition vga.h:21
enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_ring *ring)
enum vxge_hw_status vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
#define VXGE_HW_RING_RXD_QWORD_LIMIT
#define vxge_debug(mask, fmt...)
#define VXGE_ERR
Definition vxge_config.h:54
#define VXGE_HW_RING_RX_POLL_WEIGHT

References __vxge_hw_ring_delete(), __vxge_hw_ring::buf_per_block, __vxge_hw_device::common_reg, __vxge_hw_ring::common_reg, __vxge_hw_virtualpath::hldev, malloc_phys(), __vxge_hw_ring::rx_poll_weight, __vxge_hw_ring::rxd_offset, __vxge_hw_ring::rxd_qword_limit, __vxge_hw_ring::rxdl, status, u32, __vxge_hw_ring::vp_id, __vxge_hw_virtualpath::vp_id, __vxge_hw_ring::vp_reg, __vxge_hw_virtualpath::vp_reg, __vxge_hw_ring::vpathh, vxge_debug, VXGE_ERR, VXGE_HW_ERR_OUT_OF_MEMORY, VXGE_HW_OK, VXGE_HW_RING_BUF_PER_BLOCK, vxge_hw_ring_replenish(), VXGE_HW_RING_RX_POLL_WEIGHT, VXGE_HW_RING_RXD_QWORD_LIMIT, and vxge_trace.

Referenced by vxge_hw_vpath_open().

◆ __vxge_hw_ring_delete()

enum vxge_hw_status __vxge_hw_ring_delete ( struct __vxge_hw_ring * ringh)

Definition at line 659 of file vxge_config.c.

660{
661 u8 i;
662
663 vxge_trace();
664
665 for (i = 0; (i < ARRAY_SIZE(ring->iobuf)) && ring->iobuf[i]; i++) {
666 free_iob(ring->iobuf[i]);
667 ring->iobuf[i] = NULL;
668 }
669
670 if (ring->rxdl) {
671 free_phys(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
672 ring->rxdl = NULL;
673 }
674 ring->rxd_offset = 0;
675
676 return VXGE_HW_OK;
677}
void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition malloc.c:723

References ARRAY_SIZE, free_iob(), free_phys(), __vxge_hw_ring::iobuf, NULL, __vxge_hw_ring::rxd_offset, __vxge_hw_ring::rxdl, u8, VXGE_HW_OK, and vxge_trace.

Referenced by __vxge_hw_ring_create(), and vxge_hw_vpath_close().

◆ __vxge_hw_fifo_create()

enum vxge_hw_status __vxge_hw_fifo_create ( struct __vxge_hw_virtualpath * vpath,
struct __vxge_hw_fifo * fifo )

Definition at line 814 of file vxge_config.c.

816{
818
819 vxge_trace();
820
821 fifo->vpathh = vpath;
823 fifo->hw_offset = fifo->sw_offset = 0;
824 fifo->nofl_db = vpath->nofl_db;
825 fifo->vp_id = vpath->vp_id;
826 fifo->vp_reg = vpath->vp_reg;
827 fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP)
829
830 fifo->txdl = malloc_phys(sizeof(struct vxge_hw_fifo_txd)
831 * fifo->depth, fifo->depth);
832 if (!fifo->txdl) {
833 vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n",
834 __func__, __LINE__);
836 }
837 memset(fifo->txdl, 0, sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
838 return status;
839}
void * memset(void *dest, int character, size_t len) __nonnull
struct vxge_hw_vpath_reg * vp_reg
struct __vxge_hw_non_offload_db_wrapper * nofl_db
struct __vxge_hw_virtualpath * vpathh
struct vxge_hw_fifo_txd * txdl
struct __vxge_hw_non_offload_db_wrapper * nofl_db
struct vxge_hw_fifo_txd - Transmit Descriptor
#define VXGE_HW_FIFO_TXD_DEPTH
#define VXGE_HW_VPATH_INTR_TX
#define VXGE_HW_MAX_INTR_PER_VP

References __vxge_hw_fifo::depth, __vxge_hw_fifo::hw_offset, malloc_phys(), memset(), __vxge_hw_fifo::nofl_db, __vxge_hw_virtualpath::nofl_db, status, __vxge_hw_fifo::sw_offset, __vxge_hw_fifo::tx_intr_num, __vxge_hw_fifo::txdl, __vxge_hw_fifo::vp_id, __vxge_hw_virtualpath::vp_id, __vxge_hw_fifo::vp_reg, __vxge_hw_virtualpath::vp_reg, __vxge_hw_fifo::vpathh, vxge_debug, VXGE_ERR, VXGE_HW_ERR_OUT_OF_MEMORY, VXGE_HW_FIFO_TXD_DEPTH, VXGE_HW_MAX_INTR_PER_VP, VXGE_HW_OK, VXGE_HW_VPATH_INTR_TX, and vxge_trace.

Referenced by vxge_hw_vpath_open().

◆ __vxge_hw_fifo_delete()

enum vxge_hw_status __vxge_hw_fifo_delete ( struct __vxge_hw_fifo * fifo)

Definition at line 845 of file vxge_config.c.

846{
847 vxge_trace();
848
849 if (fifo->txdl)
850 free_phys(fifo->txdl,
851 sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
852
853 fifo->txdl = NULL;
854 fifo->hw_offset = fifo->sw_offset = 0;
855
856 return VXGE_HW_OK;
857}

References __vxge_hw_fifo::depth, free_phys(), __vxge_hw_fifo::hw_offset, NULL, __vxge_hw_fifo::sw_offset, __vxge_hw_fifo::txdl, VXGE_HW_OK, and vxge_trace.

Referenced by vxge_hw_vpath_close(), and vxge_hw_vpath_open().

◆ __vxge_hw_vpath_reset()

enum vxge_hw_status __vxge_hw_vpath_reset ( struct __vxge_hw_device * devh,
u32 vp_id )

Definition at line 1270 of file vxge_config.c.

1271{
1272 u64 val64;
1274
1275 vxge_trace();
1276
1277 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
1278
1280 &hldev->common_reg->cmn_rsthdlr_cfg0);
1281
1282 return status;
1283}
static void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val)
Definition vxge_reg.h:763
#define vxge_bVALn(bits, loc, n)
Definition vxge_reg.h:35

References __vxge_hw_pio_mem_write32_upper(), vxge_hw_common_reg::cmn_rsthdlr_cfg0, __vxge_hw_device::common_reg, status, u32, vxge_bVALn, VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH, VXGE_HW_OK, and vxge_trace.

Referenced by __vxge_hw_vp_initialize(), and vxge_hw_vpath_reset().

◆ __vxge_hw_vpath_enable()

enum vxge_hw_status __vxge_hw_vpath_enable ( struct __vxge_hw_device * devh,
u32 vp_id )

References u32.

◆ __vxge_hw_vpath_prc_configure()

void __vxge_hw_vpath_prc_configure ( struct __vxge_hw_device * hldev)

Definition at line 1291 of file vxge_config.c.

1292{
1293 u64 val64;
1294 struct __vxge_hw_virtualpath *vpath;
1295 struct vxge_hw_vpath_reg __iomem *vp_reg;
1296
1297 vxge_trace();
1298
1299 vpath = &hldev->virtual_path;
1300 vp_reg = vpath->vp_reg;
1301
1302 val64 = readq(&vp_reg->prc_cfg1);
1304 writeq(val64, &vp_reg->prc_cfg1);
1305
1306 val64 = readq(&vpath->vp_reg->prc_cfg6);
1307 val64 &= ~VXGE_HW_PRC_CFG6_RXD_CRXDT(0x1ff);
1308 val64 &= ~VXGE_HW_PRC_CFG6_RXD_SPAT(0x1ff);
1310 val64 |= VXGE_HW_PRC_CFG6_RXD_CRXDT(0x3);
1311 val64 |= VXGE_HW_PRC_CFG6_RXD_SPAT(0xf);
1312 writeq(val64, &vpath->vp_reg->prc_cfg6);
1313
1315 (u64)virt_to_bus(vpath->ringh.rxdl) >> 3),
1316 &vp_reg->prc_cfg5);
1317
1318 val64 = readq(&vp_reg->prc_cfg4);
1319 val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
1320 val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
1324
1325 writeq(val64, &vp_reg->prc_cfg4);
1326 return;
1327}
#define __iomem
Definition igbvf_osdep.h:46
#define readq(io_addr)
Definition io.h:234
#define writeq(data, io_addr)
Definition io.h:273
struct __vxge_hw_virtualpath virtual_path
struct __vxge_hw_ring ringh
#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN
Definition vxge_reg.h:4082
#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER
Definition vxge_reg.h:139
#define VXGE_HW_PRC_CFG4_RING_MODE(val)
Definition vxge_reg.h:4069
#define VXGE_HW_PRC_CFG4_RTH_DISABLE
Definition vxge_reg.h:4072
#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE
Definition vxge_reg.h:4060
#define VXGE_HW_PRC_CFG4_IN_SVC
Definition vxge_reg.h:4068
#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val)
Definition vxge_reg.h:4085
#define VXGE_HW_PRC_CFG5_RXD0_ADD(val)
Definition vxge_reg.h:4078
#define VXGE_HW_PRC_CFG6_RXD_SPAT(val)
Definition vxge_reg.h:4087

References __iomem, vxge_hw_vpath_reg::prc_cfg1, vxge_hw_vpath_reg::prc_cfg4, vxge_hw_vpath_reg::prc_cfg5, vxge_hw_vpath_reg::prc_cfg6, readq, __vxge_hw_virtualpath::ringh, __vxge_hw_ring::rxdl, virt_to_bus(), __vxge_hw_device::virtual_path, __vxge_hw_virtualpath::vp_reg, VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE, VXGE_HW_PRC_CFG4_IN_SVC, VXGE_HW_PRC_CFG4_RING_MODE, VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER, VXGE_HW_PRC_CFG4_RTH_DISABLE, VXGE_HW_PRC_CFG5_RXD0_ADD, VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN, VXGE_HW_PRC_CFG6_RXD_CRXDT, VXGE_HW_PRC_CFG6_RXD_SPAT, vxge_trace, and writeq.

Referenced by vxge_hw_vpath_open(), and vxge_hw_vpath_recover_from_reset().

◆ __vxge_hw_vpath_kdfc_configure()

enum vxge_hw_status __vxge_hw_vpath_kdfc_configure ( struct __vxge_hw_device * devh,
u32 vp_id )

Definition at line 1335 of file vxge_config.c.

1336{
1337 u64 val64;
1338 u64 vpath_stride;
1340 struct __vxge_hw_virtualpath *vpath;
1341 struct vxge_hw_vpath_reg __iomem *vp_reg;
1342
1343 vxge_trace();
1344
1345 vpath = &hldev->virtual_path;
1346 vp_reg = vpath->vp_reg;
1347 status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
1348
1349 if (status != VXGE_HW_OK)
1350 goto exit;
1351
1352 val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
1353
1354 vpath->max_kdfc_db =
1356 val64+1)/2;
1357
1358 vpath->max_nofl_db = vpath->max_kdfc_db;
1359
1361 (vpath->max_nofl_db*2)-1);
1362
1363 writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
1364
1366 &vp_reg->kdfc_fifo_trpl_ctrl);
1367
1368 val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
1369
1370 val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
1372
1375#if (__BYTE_ORDER != __BIG_ENDIAN)
1377#endif
1379
1380 writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
1382 wmb();
1383 vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
1384
1385 vpath->nofl_db =
1387 (hldev->kdfc + (vp_id *
1389 vpath_stride)));
1390exit:
1391 return status;
1392}
#define wmb()
Definition io.h:546
u64 kdfc_drbl_triplet_total
Definition vxge_reg.h:4167
u64 kdfc_trpl_fifo_0_wb_address
Definition vxge_reg.h:4157
u64 kdfc_fifo_trpl_partition
Definition vxge_reg.h:4116
enum vxge_hw_status __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, struct vxge_hw_vpath_reg __iomem *vpath_reg)
#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val)
Definition vxge_reg.h:4123
#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits)
Definition vxge_reg.h:105
#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY
Definition vxge_reg.h:109
#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits)
Definition vxge_reg.h:124
#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE
Definition vxge_reg.h:4121
#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN
Definition vxge_reg.h:4125
#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val)
Definition vxge_reg.h:4117
#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val)
Definition vxge_reg.h:4131

References __iomem, __vxge_hw_kdfc_swapper_set(), __vxge_hw_device::kdfc, vxge_hw_vpath_reg::kdfc_drbl_triplet_total, vxge_hw_vpath_reg::kdfc_fifo_trpl_ctrl, vxge_hw_vpath_reg::kdfc_fifo_trpl_partition, vxge_hw_vpath_reg::kdfc_trpl_fifo_0_ctrl, vxge_hw_vpath_reg::kdfc_trpl_fifo_0_wb_address, __vxge_hw_device::legacy_reg, __vxge_hw_virtualpath::max_kdfc_db, __vxge_hw_virtualpath::max_nofl_db, __vxge_hw_virtualpath::nofl_db, readq, status, vxge_hw_toc_reg::toc_kdfc_vpath_stride, __vxge_hw_device::toc_reg, u32, __vxge_hw_device::virtual_path, __vxge_hw_virtualpath::vp_reg, VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE, VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE, VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0, VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE, VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY, VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT, VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN, VXGE_HW_OK, VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE, vxge_trace, wmb, and writeq.

Referenced by __vxge_hw_vpath_initialize().

◆ __vxge_hw_vpath_mac_configure()

enum vxge_hw_status __vxge_hw_vpath_mac_configure ( struct __vxge_hw_device * devh)

Definition at line 1399 of file vxge_config.c.

1400{
1401 u64 val64;
1403 struct __vxge_hw_virtualpath *vpath;
1404 struct vxge_hw_vpath_reg __iomem *vp_reg;
1405
1406 vxge_trace();
1407
1408 vpath = &hldev->virtual_path;
1409 vp_reg = vpath->vp_reg;
1410
1412 vpath->vsport_number), &vp_reg->xmac_vsport_choice);
1413
1414 val64 = readq(&vp_reg->rxmac_vcfg1);
1415
1418
1419 writeq(val64, &vp_reg->rxmac_vcfg1);
1420 return status;
1421}
#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val)
Definition vxge_reg.h:4224
#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE
Definition vxge_reg.h:4202
#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val)
Definition vxge_reg.h:4201

References __iomem, readq, vxge_hw_vpath_reg::rxmac_vcfg1, status, __vxge_hw_device::virtual_path, __vxge_hw_virtualpath::vp_reg, __vxge_hw_virtualpath::vsport_number, VXGE_HW_OK, VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE, VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE, VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER, vxge_trace, writeq, and vxge_hw_vpath_reg::xmac_vsport_choice.

Referenced by __vxge_hw_vpath_initialize().

◆ __vxge_hw_vpath_tim_configure()

enum vxge_hw_status __vxge_hw_vpath_tim_configure ( struct __vxge_hw_device * devh,
u32 vp_id )

Definition at line 1429 of file vxge_config.c.

1430{
1431 u64 val64;
1433 struct __vxge_hw_virtualpath *vpath;
1434 struct vxge_hw_vpath_reg __iomem *vp_reg;
1435
1436 vxge_trace();
1437
1438 vpath = &hldev->virtual_path;
1439 vp_reg = vpath->vp_reg;
1440
1441 writeq((u64)0, &vp_reg->tim_dest_addr);
1442 writeq((u64)0, &vp_reg->tim_vpath_map);
1443 writeq((u64)0, &vp_reg->tim_bitmap);
1444 writeq((u64)0, &vp_reg->tim_remap);
1445
1447 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
1449
1450 val64 = readq(&vp_reg->tim_pci_cfg);
1452 writeq(val64, &vp_reg->tim_pci_cfg);
1453
1454 /* TX configuration */
1456 (VXGE_TTI_BTIMER_VAL * 1000) / 272);
1464
1470
1474 (VXGE_TTI_LTIMER_VAL * 1000) / 272);
1476
1477 /* RX configuration */
1479 (VXGE_RTI_BTIMER_VAL * 1000) / 272);
1485
1491
1495 (VXGE_RTI_LTIMER_VAL * 1000) / 272);
1497
1498 val64 = 0;
1505
1506 return status;
1507}
u64 tim_cfg3_int_num[4]
Definition vxge_reg.h:4301
u64 tim_cfg1_int_num[4]
Definition vxge_reg.h:4286
u64 tim_cfg2_int_num[4]
Definition vxge_reg.h:4296
#define RTI_RX_UFC_C
Definition vxge_main.h:120
#define VXGE_TTI_LTIMER_VAL
Definition vxge_main.h:77
#define TTI_TX_URANGE_B
Definition vxge_main.h:94
#define TTI_TX_URANGE_C
Definition vxge_main.h:95
#define TTI_TX_UFC_A
Definition vxge_main.h:96
#define RTI_RX_UFC_B
Definition vxge_main.h:119
#define TTI_TX_URANGE_A
Definition vxge_main.h:93
#define TTI_TX_UFC_C
Definition vxge_main.h:98
#define VXGE_RTI_BTIMER_VAL
Definition vxge_main.h:79
#define VXGE_RTI_LTIMER_VAL
Definition vxge_main.h:80
#define RTI_RX_UFC_A
Definition vxge_main.h:118
#define RTI_RX_URANGE_C
Definition vxge_main.h:114
#define RTI_RX_UFC_D
Definition vxge_main.h:121
#define TTI_TX_UFC_D
Definition vxge_main.h:99
#define TTI_TX_UFC_B
Definition vxge_main.h:97
#define RTI_RX_URANGE_A
Definition vxge_main.h:112
#define RTI_RX_URANGE_B
Definition vxge_main.h:113
#define VXGE_TTI_BTIMER_VAL
Definition vxge_main.h:73
#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val)
Definition vxge_reg.h:4305
#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val)
Definition vxge_reg.h:4297
#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val)
Definition vxge_reg.h:4299
#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val)
Definition vxge_reg.h:4293
#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
Definition vxge_reg.h:4291
#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN
Definition vxge_reg.h:4289
#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val)
Definition vxge_reg.h:4298
#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
Definition vxge_reg.h:4292
#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val)
Definition vxge_reg.h:4319
#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val)
Definition vxge_reg.h:4300
#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val)
Definition vxge_reg.h:4306
#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val)
Definition vxge_reg.h:4294
#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val)
Definition vxge_reg.h:4295
#define VXGE_HW_TIM_PCI_CFG_ADD_PAD
Definition vxge_reg.h:4328
#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val)
Definition vxge_reg.h:4287
#define VXGE_HW_VPATH_INTR_BMAP
#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL
#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL
#define VXGE_HW_VPATH_INTR_EINTA
#define VXGE_HW_VPATH_INTR_RX

References __iomem, readq, RTI_RX_UFC_A, RTI_RX_UFC_B, RTI_RX_UFC_C, RTI_RX_UFC_D, RTI_RX_URANGE_A, RTI_RX_URANGE_B, RTI_RX_URANGE_C, status, vxge_hw_vpath_reg::tim_bitmap, vxge_hw_vpath_reg::tim_cfg1_int_num, vxge_hw_vpath_reg::tim_cfg2_int_num, vxge_hw_vpath_reg::tim_cfg3_int_num, vxge_hw_vpath_reg::tim_dest_addr, vxge_hw_vpath_reg::tim_pci_cfg, vxge_hw_vpath_reg::tim_remap, vxge_hw_vpath_reg::tim_ring_assn, vxge_hw_vpath_reg::tim_vpath_map, TTI_TX_UFC_A, TTI_TX_UFC_B, TTI_TX_UFC_C, TTI_TX_UFC_D, TTI_TX_URANGE_A, TTI_TX_URANGE_B, TTI_TX_URANGE_C, u32, __vxge_hw_device::virtual_path, __vxge_hw_virtualpath::vp_reg, VXGE_HW_MAX_INTR_PER_VP, VXGE_HW_OK, VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL, VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC, VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI, VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN, VXGE_HW_TIM_CFG1_INT_NUM_URNG_A, VXGE_HW_TIM_CFG1_INT_NUM_URNG_B, VXGE_HW_TIM_CFG1_INT_NUM_URNG_C, VXGE_HW_TIM_CFG2_INT_NUM_UEC_A, VXGE_HW_TIM_CFG2_INT_NUM_UEC_B, VXGE_HW_TIM_CFG2_INT_NUM_UEC_C, VXGE_HW_TIM_CFG2_INT_NUM_UEC_D, VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL, VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL, VXGE_HW_TIM_PCI_CFG_ADD_PAD, VXGE_HW_TIM_RING_ASSN_INT_NUM, VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL, VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL, VXGE_HW_VPATH_INTR_BMAP, VXGE_HW_VPATH_INTR_EINTA, VXGE_HW_VPATH_INTR_RX, VXGE_HW_VPATH_INTR_TX, VXGE_RTI_BTIMER_VAL, VXGE_RTI_LTIMER_VAL, vxge_trace, VXGE_TTI_BTIMER_VAL, VXGE_TTI_LTIMER_VAL, and writeq.

Referenced by __vxge_hw_vpath_initialize().

◆ __vxge_hw_vpath_initialize()

enum vxge_hw_status __vxge_hw_vpath_initialize ( struct __vxge_hw_device * devh,
u32 vp_id )

Definition at line 1515 of file vxge_config.c.

1516{
1517 u64 val64;
1518 u32 val32;
1519 int i;
1521 struct __vxge_hw_virtualpath *vpath;
1522 struct vxge_hw_vpath_reg *vp_reg;
1523
1524 vxge_trace();
1525
1526 vpath = &hldev->virtual_path;
1527
1528 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
1530 goto exit;
1531 }
1532 vp_reg = vpath->vp_reg;
1533 status = __vxge_hw_legacy_swapper_set(hldev->legacy_reg);
1534 if (status != VXGE_HW_OK)
1535 goto exit;
1536
1538
1539 if (status != VXGE_HW_OK)
1540 goto exit;
1541 val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
1542
1543 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1544 if (val64 & vxge_mBIT(i))
1545 vpath->vsport_number = i;
1546 }
1547
1549
1550 if (status != VXGE_HW_OK)
1551 goto exit;
1552
1553 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
1554
1555 if (status != VXGE_HW_OK)
1556 goto exit;
1557
1558 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
1559
1560 if (status != VXGE_HW_OK)
1561 goto exit;
1562
1563 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
1564
1565 /* Get MRRS value from device control */
1566 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
1567
1568 if (status == VXGE_HW_OK) {
1569 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
1570 val64 &=
1572 val64 |=
1574
1576 }
1577
1579 val64 |=
1582
1584 writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
1585
1586exit:
1587 return status;
1588}
struct vxge_hw_vpmgmt_reg * vpmgmt_reg
u64 rtdma_rd_optimization_ctrl
Definition vxge_reg.h:4259
enum vxge_hw_status __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev)
enum vxge_hw_status __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, u32 phy_func_0, u32 offset, u32 *val)
enum vxge_hw_status __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
enum vxge_hw_status __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
enum vxge_hw_status __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
enum vxge_hw_status __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
#define VXGE_HW_MAX_PAYLOAD_SIZE_512
Definition vxge_config.h:68
@ VXGE_HW_ERR_VPATH_NOT_AVAILABLE
Definition vxge_config.h:86
#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN
Definition vxge_reg.h:4270
#define VXGE_HW_PCI_EXP_DEVCTL_READRQ
Definition vxge_reg.h:4264
#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val)
Definition vxge_reg.h:4271
#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val)
Definition vxge_reg.h:4265
#define vxge_mBIT(loc)
Definition vxge_reg.h:24
#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE
Definition vxge_reg.h:4263
#define VXGE_HW_MAX_VIRTUAL_PATHS

References __vxge_hw_legacy_swapper_set(), __vxge_hw_vpath_kdfc_configure(), __vxge_hw_vpath_mac_configure(), __vxge_hw_vpath_pci_read(), __vxge_hw_vpath_swapper_set(), __vxge_hw_vpath_tim_configure(), __vxge_hw_device::legacy_reg, readq, vxge_hw_vpath_reg::rtdma_rd_optimization_ctrl, status, u32, __vxge_hw_device::virtual_path, __vxge_hw_virtualpath::vp_reg, __vxge_hw_device::vpath_assignments, __vxge_hw_virtualpath::vpmgmt_reg, __vxge_hw_virtualpath::vsport_number, VXGE_HW_ERR_VPATH_NOT_AVAILABLE, VXGE_HW_MAX_PAYLOAD_SIZE_512, VXGE_HW_MAX_VIRTUAL_PATHS, VXGE_HW_OK, VXGE_HW_PCI_EXP_DEVCTL_READRQ, VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY, VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN, VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH, VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE, vxge_mBIT, vxge_trace, writeq, and vxge_hw_vpmgmt_reg::xmac_vsport_choices_vp.

Referenced by __vxge_hw_vp_initialize(), and vxge_hw_vpath_recover_from_reset().

◆ __vxge_hw_vp_initialize()

enum vxge_hw_status __vxge_hw_vp_initialize ( struct __vxge_hw_device * hldev,
u32 vp_id,
struct __vxge_hw_virtualpath * vpath )

Definition at line 1596 of file vxge_config.c.

1598{
1600
1601 vxge_trace();
1602
1603 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
1605 goto exit;
1606 }
1607
1608 vpath->vp_id = vp_id;
1609 vpath->vp_open = VXGE_HW_VP_OPEN;
1610 vpath->hldev = hldev;
1611 vpath->vp_reg = hldev->vpath_reg[vp_id];
1612 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
1613
1614 __vxge_hw_vpath_reset(hldev, vp_id);
1615
1617 if (status != VXGE_HW_OK) {
1618 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
1619 goto exit;
1620 }
1621
1623 hldev->tim_int_mask1, vp_id);
1624
1625 status = __vxge_hw_vpath_initialize(hldev, vp_id);
1626
1627 if (status != VXGE_HW_OK) {
1628 __vxge_hw_vp_terminate(hldev, vpath);
1629 goto exit;
1630 }
1631
1633exit:
1634 return status;
1635}
struct vxge_hw_vpmgmt_reg * vpmgmt_reg[VXGE_HW_TITAN_VPMGMT_REG_SPACES]
struct vxge_hw_vpath_reg * vpath_reg[VXGE_HW_TITAN_VPATH_REG_SPACES]
enum vxge_hw_status __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
enum vxge_hw_status __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, struct __vxge_hw_virtualpath *vpath)
static enum vxge_hw_status __vxge_hw_vpath_mgmt_read(struct __vxge_hw_virtualpath *vpath)
enum vxge_hw_status __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i)
#define VXGE_HW_VP_OPEN

References __vxge_hw_vp_terminate(), __vxge_hw_vpath_initialize(), __vxge_hw_vpath_mgmt_read(), __vxge_hw_vpath_reset(), __vxge_hw_vpath_reset_check(), __vxge_hw_virtualpath::hldev, memset(), status, __vxge_hw_device::tim_int_mask0, __vxge_hw_device::tim_int_mask1, u32, __vxge_hw_virtualpath::vp_id, __vxge_hw_virtualpath::vp_open, __vxge_hw_virtualpath::vp_reg, __vxge_hw_device::vpath_assignments, __vxge_hw_device::vpath_reg, __vxge_hw_device::vpmgmt_reg, __vxge_hw_virtualpath::vpmgmt_reg, VXGE_HW_DEVICE_TIM_INT_MASK_SET, VXGE_HW_ERR_VPATH_NOT_AVAILABLE, VXGE_HW_OK, VXGE_HW_VP_OPEN, vxge_mBIT, and vxge_trace.

Referenced by vxge_hw_vpath_open().

◆ __vxge_hw_vp_terminate()

void __vxge_hw_vp_terminate ( struct __vxge_hw_device * hldev,
struct __vxge_hw_virtualpath * vpath )

Definition at line 1642 of file vxge_config.c.

1644{
1645 vxge_trace();
1646
1647 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
1648 return;
1649
1651 hldev->tim_int_mask1, vpath->vp_id);
1652
1653 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
1654}
#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i)
#define VXGE_HW_VP_NOT_OPEN

References memset(), __vxge_hw_device::tim_int_mask0, __vxge_hw_device::tim_int_mask1, __vxge_hw_virtualpath::vp_id, __vxge_hw_virtualpath::vp_open, VXGE_HW_DEVICE_TIM_INT_MASK_RESET, VXGE_HW_VP_NOT_OPEN, and vxge_trace.

Referenced by __vxge_hw_vp_initialize(), vxge_hw_vpath_close(), and vxge_hw_vpath_open().

◆ vxge_hw_device_begin_irq()

enum vxge_hw_status vxge_hw_device_begin_irq ( struct __vxge_hw_device * hldev)

vxge_hw_device_begin_irq - Begin IRQ processing.

@hldev: HW device handle.

The function performs two actions, It first checks whether (shared IRQ) the interrupt was raised by the device. Next, it masks the device interrupts.

Note: vxge_hw_device_begin_irq() does not flush MMIO writes through the bridge. Therefore, two back-to-back interrupts are potentially possible.

Returns: 0, if the interrupt is not "ours" (note that in this case the device remain enabled). Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter status.

Definition at line 555 of file vxge_traffic.c.

556{
557 u64 val64;
559 u64 vpath_mask;
560 enum vxge_hw_status ret = VXGE_HW_OK;
561
562 val64 = readq(&hldev->common_reg->titan_general_int_status);
563
564 if (!val64) {
566 goto exit;
567 }
568
569 if (val64 == VXGE_HW_ALL_FOXES) {
570
572
574
575 vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
576 "occurred\n", hldev->ndev->name,
577 __func__, __LINE__);
579 goto exit;
580 }
581 }
582
583 vpath_mask = hldev->vpaths_deployed >>
586 vpath_mask))
588
591
592exit:
593 return ret;
594}
struct net_device * ndev
char name[NETDEV_NAME_LEN]
Name of this network device.
Definition netdevice.h:363
u64 titan_general_int_status
Definition vxge_reg.h:803
@ VXGE_HW_ERR_SLOT_FREEZE
@ VXGE_HW_ERR_WRONG_IRQ
Definition vxge_config.h:88
#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val)
Definition vxge_reg.h:807
#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT
Definition vxge_reg.h:806
u64 adapter_status
Definition vxge_reg.h:121
void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the condition that has caused the Tx and RX...
static enum vxge_hw_status __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath)
#define VXGE_HW_ALL_FOXES

References __vxge_hw_vpath_alarm_process(), adapter_status, vxge_hw_common_reg::adapter_status, __vxge_hw_device::common_reg, net_device::name, __vxge_hw_device::ndev, readq, vxge_hw_common_reg::titan_general_int_status, __vxge_hw_device::virtual_path, __vxge_hw_device::vpaths_deployed, vxge_debug, VXGE_ERR, VXGE_HW_ALL_FOXES, vxge_hw_device_clear_tx_rx(), VXGE_HW_ERR_SLOT_FREEZE, VXGE_HW_ERR_WRONG_IRQ, VXGE_HW_MAX_VIRTUAL_PATHS, VXGE_HW_OK, VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT, and VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT.

Referenced by vxge_poll().

◆ vxge_hw_device_intr_enable()

void vxge_hw_device_intr_enable ( struct __vxge_hw_device * hldev)

vxge_hw_device_intr_enable - Enable interrupts.

@hldev: HW device handle.

Enable Titan interrupts. The function is to be executed the last in Titan initialization sequence.

See also: vxge_hw_device_intr_disable()

Definition at line 251 of file vxge_traffic.c.

252{
253 u64 val64;
254 u32 val32;
255
257
259
260 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
262
263 if (val64 != 0) {
264 writeq(val64, &hldev->common_reg->tim_int_status0);
265
266 writeq(~val64, &hldev->common_reg->tim_int_mask0);
267 }
268
269 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
271
272 if (val32 != 0) {
274 &hldev->common_reg->tim_int_status1);
275
277 &hldev->common_reg->tim_int_mask1);
278 }
279
280 val64 = readq(&hldev->common_reg->titan_general_int_status);
281
282 /* We have not enabled the top level interrupt yet.
283 * This will be controlled from vxge_irq() entry api.
284 */
285 return;
286}
void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
vxge_hw_device_mask_all - Mask all device interrupts.
enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)

References __vxge_hw_pio_mem_write32_upper(), __vxge_hw_device::common_reg, readq, __vxge_hw_device::tim_int_mask0, vxge_hw_common_reg::tim_int_mask0, __vxge_hw_device::tim_int_mask1, vxge_hw_common_reg::tim_int_mask1, vxge_hw_common_reg::tim_int_status0, vxge_hw_common_reg::tim_int_status1, vxge_hw_common_reg::titan_general_int_status, u32, __vxge_hw_device::virtual_path, vxge_hw_device_mask_all(), vxge_hw_vpath_intr_enable(), VXGE_HW_VPATH_INTR_RX, VXGE_HW_VPATH_INTR_TX, and writeq.

Referenced by vxge_open().

◆ vxge_hw_device_intr_disable()

void vxge_hw_device_intr_disable ( struct __vxge_hw_device * hldev)

vxge_hw_device_intr_disable - Disable Titan interrupts.

@hldev: HW device handle.

Disable Titan interrupts.

See also: vxge_hw_device_intr_enable()

Definition at line 296 of file vxge_traffic.c.

297{
299
300 /* mask all the tim interrupts */
303 &hldev->common_reg->tim_int_mask1);
304
306
307 return;
308}
enum vxge_hw_status vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
#define VXGE_HW_INTR_MASK_ALL
#define VXGE_HW_DEFAULT_32

References __vxge_hw_pio_mem_write32_upper(), __vxge_hw_device::common_reg, vxge_hw_common_reg::tim_int_mask0, vxge_hw_common_reg::tim_int_mask1, __vxge_hw_device::virtual_path, VXGE_HW_DEFAULT_32, vxge_hw_device_mask_all(), VXGE_HW_INTR_MASK_ALL, vxge_hw_vpath_intr_disable(), and writeq.

Referenced by vxge_close().

◆ vxge_hw_device_mask_all()

void vxge_hw_device_mask_all ( struct __vxge_hw_device * hldev)

vxge_hw_device_mask_all - Mask all device interrupts.

@hldev: HW device handle.

Mask all device interrupts.

See also: vxge_hw_device_unmask_all()

Definition at line 211 of file vxge_traffic.c.

212{
213 u64 val64;
214
217
220
221 return;
222}
#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM
Definition vxge_reg.h:812
#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
Definition vxge_reg.h:813

References __vxge_hw_pio_mem_write32_upper(), __vxge_hw_device::common_reg, vxge_hw_common_reg::titan_mask_all_int, u32, vxge_bVALn, VXGE_HW_TITAN_MASK_ALL_INT_ALARM, and VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC.

Referenced by vxge_hw_device_intr_disable(), vxge_hw_device_intr_enable(), and vxge_irq().

◆ vxge_hw_device_unmask_all()

void vxge_hw_device_unmask_all ( struct __vxge_hw_device * hldev)

vxge_hw_device_unmask_all - Unmask all device interrupts.

@hldev: HW device handle.

Unmask all device interrupts.

See also: vxge_hw_device_mask_all()

Definition at line 232 of file vxge_traffic.c.

233{
235
238
239 return;
240}

References __vxge_hw_pio_mem_write32_upper(), __vxge_hw_device::common_reg, vxge_hw_common_reg::titan_mask_all_int, u32, vxge_bVALn, and VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC.

Referenced by vxge_close(), and vxge_irq().

◆ vxge_hw_vpath_doorbell_rx()

void vxge_hw_vpath_doorbell_rx ( struct __vxge_hw_ring * ring)

vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.

@ring: Handle to the ring object used for receive

The function writes the number of qwords of rxds posted during replishment. Since the function is called frequently, a flush is not required to post the write transaction. At the very least, the previous write will be flushed once the subsequent write is made.

Returns: None.

Definition at line 608 of file vxge_traffic.c.

609{
610 u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
612
614
616
617 if (ring->total_db_cnt >= rxds_qw_per_block) {
618 /* For each block add 4 more qwords */
620
621 /* Reset total count */
622 ring->total_db_cnt -= rxds_qw_per_block;
623 }
624
625 if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
626 wmb();
628 ring->doorbell_cnt),
629 &ring->vp_reg->prc_rxd_doorbell);
630 ring->doorbell_cnt = 0;
631 }
632}
#define VXGE_HW_RING_RXD_QWORDS_MODE_1
#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val)
Definition vxge_reg.h:4099

References __vxge_hw_ring::doorbell_cnt, vxge_hw_vpath_reg::prc_rxd_doorbell, __vxge_hw_ring::rxd_qword_limit, __vxge_hw_ring::total_db_cnt, u32, __vxge_hw_ring::vp_reg, VXGE_HW_MAX_RXDS_PER_BLOCK_1, VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT, VXGE_HW_RING_RXD_QWORDS_MODE_1, wmb, and writeq.

Referenced by vxge_hw_vpath_poll_rx().

◆ vxge_hw_vpath_poll_rx()

enum vxge_hw_status vxge_hw_vpath_poll_rx ( struct __vxge_hw_ring * ringh)

Definition at line 642 of file vxge_traffic.c.

643{
644 struct __vxge_hw_device *hldev;
646 struct vxge_hw_ring_rxd_1 *rxd;
647 unsigned int len;
648 enum vxge_hw_ring_tcode tcode;
649 struct io_buffer *rx_iob, *iobuf = NULL;
650 u16 poll_count = 0;
651
652 hldev = ring->vpathh->hldev;
653
654 do {
655 rxd = &ring->rxdl->rxd[ring->rxd_offset];
656 tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
657
658 /* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
659 * possible the ownership bit still set to adapter
660 */
661 if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
662 && (tcode == VXGE_HW_RING_T_CODE_OK)) {
663
665 goto err0;
666 }
667
668 vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
669 hldev->ndev->name, ring->rxd_offset);
670
671 iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
672
673 if (tcode != VXGE_HW_RING_T_CODE_OK) {
674 netdev_rx_err(hldev->ndev, NULL, -EINVAL);
675 vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
676 __func__, __LINE__, tcode);
678 goto err1;
679 }
680
682 len -= ETH_FCS_LEN;
683
684 rx_iob = alloc_iob(len);
685 if (!rx_iob) {
686 netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
687 vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
688 __func__, __LINE__);
690 goto err1;
691 }
692
693 memcpy(iob_put(rx_iob, len), iobuf->data, len);
694 /* Add this packet to the receive queue. */
695 netdev_rx(hldev->ndev, rx_iob);
696
697err1:
698 /* repost the rxd */
699 rxd->control_0 = rxd->control_1 = 0;
703
704 /* repost the qword count for doorbell */
706
707 /* increment the descriptor offset */
708 vxge_hw_ring_rxd_offset_up(&ring->rxd_offset);
709
710 } while (++poll_count < ring->rx_poll_weight);
711err0:
712 return status;
713}
#define ETH_FCS_LEN
Definition atl1e.h:45
ring len
Length.
Definition dwmac.h:226
#define EINVAL
Invalid argument.
Definition errno.h:429
#define ENOMEM
Not enough space.
Definition errno.h:535
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define iob_put(iobuf, len)
Definition iobuf.h:125
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition netdevice.c:549
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition netdevice.c:587
A persistent I/O buffer.
Definition iobuf.h:38
#define u16
Definition vga.h:20
#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1)
#define VXGE_INFO
Definition vxge_config.h:50
#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0)
@ VXGE_HW_FAIL
Definition vxge_config.h:72
@ VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
Definition vxge_config.h:76
#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
static void vxge_hw_ring_rxd_offset_up(u16 *offset)
void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive descriptors posted.
vxge_hw_ring_tcode
enum vxge_hw_ring_tcode - Transfer codes returned by adapter @VXGE_HW_RING_T_CODE_OK: Transfer ok.

References alloc_iob(), io_buffer::data, EINVAL, ENOMEM, ETH_FCS_LEN, __vxge_hw_virtualpath::hldev, iob_put, len, memcpy(), net_device::name, __vxge_hw_device::ndev, netdev_rx(), netdev_rx_err(), NULL, __vxge_hw_ring_block::rxd, rxd, __vxge_hw_ring::rxd_offset, __vxge_hw_ring::rxdl, status, u16, __vxge_hw_device::vdev, __vxge_hw_ring::vpathh, vxge_debug, VXGE_ERR, VXGE_HW_ERR_OUT_OF_MEMORY, VXGE_HW_FAIL, VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS, VXGE_HW_OK, VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET, vxge_hw_ring_rxd_1b_set(), VXGE_HW_RING_RXD_LIST_OWN_ADAPTER, vxge_hw_ring_rxd_offset_up(), vxge_hw_ring_rxd_post(), VXGE_HW_RING_RXD_T_CODE_GET, VXGE_HW_RING_T_CODE_OK, vxge_hw_vpath_doorbell_rx(), VXGE_INFO, and VXGE_LL_MAX_FRAME_SIZE.

Referenced by vxge_poll().

◆ vxge_hw_vpath_poll_tx()

enum vxge_hw_status vxge_hw_vpath_poll_tx ( struct __vxge_hw_fifo * fifo)

vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.

@fifo: Handle to the fifo object used for non offload send

The function polls the Tx for the completed descriptors and calls the driver via supplied completion callback.

Definition at line 723 of file vxge_traffic.c.

724{
726 struct vxge_hw_fifo_txd *txdp;
727
728 txdp = fifo->txdl + fifo->hw_offset;
729 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
730 && (txdp->host_control)) {
731
732 vxge_xmit_compl(fifo, txdp,
734
736 }
737
738 return status;
739}
#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0)
enum vxge_hw_status vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode)
Definition vxge_main.c:65

References __vxge_hw_fifo::hw_offset, status, __vxge_hw_fifo::txdl, txdp, VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER, vxge_hw_fifo_txd_offset_up(), VXGE_HW_FIFO_TXD_T_CODE_GET, VXGE_HW_OK, and vxge_xmit_compl().

Referenced by vxge_poll().

◆ vxge_hw_fifo_free_txdl_get()

struct vxge_hw_fifo_txd * vxge_hw_fifo_free_txdl_get ( struct __vxge_hw_fifo * fifo)

vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo

@fifo: tx channel handle

Definition at line 355 of file vxge_traffic.c.

356{
357 struct vxge_hw_fifo_txd *txdp;
358
359 txdp = fifo->txdl + fifo->sw_offset;
360 if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
361 vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
362 __func__, __LINE__, fifo->sw_offset);
363 return NULL;
364 }
365
366 return txdp;
367}

References NULL, __vxge_hw_fifo::sw_offset, __vxge_hw_fifo::txdl, txdp, vxge_debug, VXGE_ERR, and VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER.

Referenced by vxge_xmit().