iPXE
vxge_config.h
Go to the documentation of this file.
00001 /*
00002  * vxge-config.h: iPXE driver for Neterion Inc's X3100 Series 10GbE
00003  *              PCIe I/O Virtualized Server Adapter.
00004  *
00005  * Copyright(c) 2002-2010 Neterion Inc.
00006  *
00007  * This software may be used and distributed according to the terms of
00008  * the GNU General Public License (GPL), incorporated herein by
00009  * reference.  Drivers based on or derived from this code fall under
00010  * the GPL and must retain the authorship, copyright and license
00011  * notice.
00012  *
00013  */
00014 
00015 FILE_LICENCE(GPL2_ONLY);
00016 
00017 #ifndef VXGE_CONFIG_H
00018 #define VXGE_CONFIG_H
00019 
00020 #include <stdint.h>
00021 #include <ipxe/list.h>
00022 #include <ipxe/pci.h>
00023 
00024 #ifndef VXGE_CACHE_LINE_SIZE
00025 #define VXGE_CACHE_LINE_SIZE 4096
00026 #endif
00027 
00028 #define WAIT_FACTOR          1
00029 
00030 #define VXGE_HW_MAC_MAX_WIRE_PORTS      2
00031 #define VXGE_HW_MAC_MAX_AGGR_PORTS      2
00032 #define VXGE_HW_MAC_MAX_PORTS           3
00033 
00034 #define VXGE_HW_MIN_MTU                         68
00035 #define VXGE_HW_MAX_MTU                         9600
00036 #define VXGE_HW_DEFAULT_MTU                     1500
00037 
00038 #ifndef __iomem
00039 #define __iomem
00040 #endif
00041 
00042 #ifndef ____cacheline_aligned
00043 #define ____cacheline_aligned
00044 #endif
00045 
00046 /**
00047  * debug filtering masks
00048  */
00049 #define VXGE_NONE       0x00
00050 #define VXGE_INFO       0x01
00051 #define VXGE_INTR       0x02
00052 #define VXGE_XMIT       0x04
00053 #define VXGE_POLL       0x08
00054 #define VXGE_ERR        0x10
00055 #define VXGE_TRACE      0x20
00056 #define VXGE_ALL        (VXGE_INFO|VXGE_INTR|VXGE_XMIT\
00057                         |VXGE_POLL|VXGE_ERR|VXGE_TRACE)
00058 
00059 #define NULL_VPID                                       0xFFFFFFFF
00060 
00061 #define VXGE_HW_EVENT_BASE                      0
00062 #define VXGE_LL_EVENT_BASE                      100
00063 
00064 #define VXGE_HW_BASE_INF        100
00065 #define VXGE_HW_BASE_ERR        200
00066 #define VXGE_HW_BASE_BADCFG     300
00067 #define VXGE_HW_DEF_DEVICE_POLL_MILLIS            1000
00068 #define VXGE_HW_MAX_PAYLOAD_SIZE_512            2
00069 
00070 enum vxge_hw_status {
00071         VXGE_HW_OK                                = 0,
00072         VXGE_HW_FAIL                              = 1,
00073         VXGE_HW_PENDING                           = 2,
00074         VXGE_HW_COMPLETIONS_REMAIN                = 3,
00075 
00076         VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
00077         VXGE_HW_INF_OUT_OF_DESCRIPTORS            = VXGE_HW_BASE_INF + 2,
00078         VXGE_HW_INF_SW_LRO_BEGIN                  = VXGE_HW_BASE_INF + 3,
00079         VXGE_HW_INF_SW_LRO_CONT                   = VXGE_HW_BASE_INF + 4,
00080         VXGE_HW_INF_SW_LRO_UNCAPABLE              = VXGE_HW_BASE_INF + 5,
00081         VXGE_HW_INF_SW_LRO_FLUSH_SESSION          = VXGE_HW_BASE_INF + 6,
00082         VXGE_HW_INF_SW_LRO_FLUSH_BOTH             = VXGE_HW_BASE_INF + 7,
00083 
00084         VXGE_HW_ERR_INVALID_HANDLE                = VXGE_HW_BASE_ERR + 1,
00085         VXGE_HW_ERR_OUT_OF_MEMORY                 = VXGE_HW_BASE_ERR + 2,
00086         VXGE_HW_ERR_VPATH_NOT_AVAILABLE           = VXGE_HW_BASE_ERR + 3,
00087         VXGE_HW_ERR_VPATH_NOT_OPEN                = VXGE_HW_BASE_ERR + 4,
00088         VXGE_HW_ERR_WRONG_IRQ                     = VXGE_HW_BASE_ERR + 5,
00089         VXGE_HW_ERR_SWAPPER_CTRL                  = VXGE_HW_BASE_ERR + 6,
00090         VXGE_HW_ERR_INVALID_MTU_SIZE              = VXGE_HW_BASE_ERR + 7,
00091         VXGE_HW_ERR_INVALID_INDEX                 = VXGE_HW_BASE_ERR + 8,
00092         VXGE_HW_ERR_INVALID_TYPE                  = VXGE_HW_BASE_ERR + 9,
00093         VXGE_HW_ERR_INVALID_OFFSET                = VXGE_HW_BASE_ERR + 10,
00094         VXGE_HW_ERR_INVALID_DEVICE                = VXGE_HW_BASE_ERR + 11,
00095         VXGE_HW_ERR_VERSION_CONFLICT              = VXGE_HW_BASE_ERR + 12,
00096         VXGE_HW_ERR_INVALID_PCI_INFO              = VXGE_HW_BASE_ERR + 13,
00097         VXGE_HW_ERR_INVALID_TCODE                 = VXGE_HW_BASE_ERR + 14,
00098         VXGE_HW_ERR_INVALID_BLOCK_SIZE            = VXGE_HW_BASE_ERR + 15,
00099         VXGE_HW_ERR_INVALID_STATE                 = VXGE_HW_BASE_ERR + 16,
00100         VXGE_HW_ERR_PRIVILAGED_OPEARATION         = VXGE_HW_BASE_ERR + 17,
00101         VXGE_HW_ERR_INVALID_PORT                  = VXGE_HW_BASE_ERR + 18,
00102         VXGE_HW_ERR_FIFO                          = VXGE_HW_BASE_ERR + 19,
00103         VXGE_HW_ERR_VPATH                         = VXGE_HW_BASE_ERR + 20,
00104         VXGE_HW_ERR_CRITICAL                      = VXGE_HW_BASE_ERR + 21,
00105         VXGE_HW_ERR_SLOT_FREEZE                   = VXGE_HW_BASE_ERR + 22,
00106         VXGE_HW_ERR_INVALID_MIN_BANDWIDTH         = VXGE_HW_BASE_ERR + 25,
00107         VXGE_HW_ERR_INVALID_MAX_BANDWIDTH         = VXGE_HW_BASE_ERR + 26,
00108         VXGE_HW_ERR_INVALID_TOTAL_BANDWIDTH       = VXGE_HW_BASE_ERR + 27,
00109         VXGE_HW_ERR_INVALID_BANDWIDTH_LIMIT       = VXGE_HW_BASE_ERR + 28,
00110         VXGE_HW_ERR_RESET_IN_PROGRESS             = VXGE_HW_BASE_ERR + 29,
00111         VXGE_HW_ERR_OUT_OF_SPACE                  = VXGE_HW_BASE_ERR + 30,
00112         VXGE_HW_ERR_INVALID_FUNC_MODE             = VXGE_HW_BASE_ERR + 31,
00113         VXGE_HW_ERR_INVALID_DP_MODE               = VXGE_HW_BASE_ERR + 32,
00114         VXGE_HW_ERR_INVALID_FAILURE_BEHAVIOUR     = VXGE_HW_BASE_ERR + 33,
00115         VXGE_HW_ERR_INVALID_L2_SWITCH_STATE       = VXGE_HW_BASE_ERR + 34,
00116         VXGE_HW_ERR_INVALID_CATCH_BASIN_MODE      = VXGE_HW_BASE_ERR + 35,
00117 
00118         VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS     = VXGE_HW_BASE_BADCFG + 1,
00119         VXGE_HW_BADCFG_FIFO_BLOCKS                = VXGE_HW_BASE_BADCFG + 2,
00120         VXGE_HW_BADCFG_VPATH_MTU                  = VXGE_HW_BASE_BADCFG + 3,
00121         VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG   = VXGE_HW_BASE_BADCFG + 4,
00122         VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH        = VXGE_HW_BASE_BADCFG + 5,
00123         VXGE_HW_BADCFG_VPATH_BANDWIDTH_LIMIT      = VXGE_HW_BASE_BADCFG + 6,
00124         VXGE_HW_BADCFG_INTR_MODE                  = VXGE_HW_BASE_BADCFG + 7,
00125         VXGE_HW_BADCFG_RTS_MAC_EN                 = VXGE_HW_BASE_BADCFG + 8,
00126         VXGE_HW_BADCFG_VPATH_AGGR_ACK             = VXGE_HW_BASE_BADCFG + 9,
00127         VXGE_HW_BADCFG_VPATH_PRIORITY             = VXGE_HW_BASE_BADCFG + 10,
00128 
00129         VXGE_HW_EOF_TRACE_BUF                     = -1
00130 };
00131 
00132 /**
00133  * enum enum vxge_hw_device_link_state - Link state enumeration.
00134  * @VXGE_HW_LINK_NONE: Invalid link state.
00135  * @VXGE_HW_LINK_DOWN: Link is down.
00136  * @VXGE_HW_LINK_UP: Link is up.
00137  *
00138  */
00139 enum vxge_hw_device_link_state {
00140         VXGE_HW_LINK_NONE,
00141         VXGE_HW_LINK_DOWN,
00142         VXGE_HW_LINK_UP
00143 };
00144 
00145 /*forward declaration*/
00146 struct vxge_vpath;
00147 struct __vxge_hw_virtualpath;
00148 
00149 /**
00150  * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
00151  *
00152  * One buffer mode RxD for ring structure
00153  */
00154 struct vxge_hw_ring_rxd_1 {
00155         u64 host_control;
00156         u64 control_0;
00157 #define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0)          vxge_bVALn(ctrl0, 0, 7)
00158 
00159 #define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER               vxge_mBIT(7)
00160 
00161 #define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0)  vxge_bVALn(ctrl0, 8, 1)
00162 
00163 #define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0)    vxge_bVALn(ctrl0, 9, 1)
00164 
00165 #define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0)    vxge_bVALn(ctrl0, 10, 1)
00166 
00167 #define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0)              vxge_bVALn(ctrl0, 12, 4)
00168 #define VXGE_HW_RING_RXD_T_CODE(val)                    vxge_vBIT(val, 12, 4)
00169 
00170 #define VXGE_HW_RING_RXD_T_CODE_UNUSED          VXGE_HW_RING_T_CODE_UNUSED
00171 
00172 #define VXGE_HW_RING_RXD_SYN_GET(ctrl0)         vxge_bVALn(ctrl0, 16, 1)
00173 
00174 #define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0)             vxge_bVALn(ctrl0, 17, 1)
00175 
00176 #define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0)        vxge_bVALn(ctrl0, 18, 1)
00177 
00178 #define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0)          vxge_bVALn(ctrl0, 19, 1)
00179 
00180 #define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0)       vxge_bVALn(ctrl0, 20, 4)
00181 
00182 #define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0)             vxge_bVALn(ctrl0, 24, 1)
00183 
00184 #define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0)         vxge_bVALn(ctrl0, 25, 2)
00185 
00186 #define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0)         vxge_bVALn(ctrl0, 27, 5)
00187 
00188 #define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0)    vxge_bVALn(ctrl0, 32, 16)
00189 
00190 #define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0)    vxge_bVALn(ctrl0, 48, 16)
00191 
00192         u64 control_1;
00193 
00194 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1)      vxge_bVALn(ctrl1, 2, 14)
00195 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
00196 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK            vxge_vBIT(0x3FFF, 2, 14)
00197 
00198 #define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1)    vxge_bVALn(ctrl1, 16, 32)
00199 
00200 #define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1)    vxge_bVALn(ctrl1, 48, 16)
00201 
00202         u64 buffer0_ptr;
00203 };
00204 
00205 /**
00206  * struct vxge_hw_fifo_txd - Transmit Descriptor
00207  *
00208  * Transmit descriptor (TxD).Fifo descriptor contains configured number
00209  * (list) of TxDs. * For more details please refer to Titan User Guide,
00210  * Section 5.4.2 "Transmit Descriptor (TxD) Format".
00211  */
00212 struct vxge_hw_fifo_txd {
00213         u64 control_0;
00214 #define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER               vxge_mBIT(7)
00215 
00216 #define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0)              vxge_bVALn(ctrl0, 12, 4)
00217 #define VXGE_HW_FIFO_TXD_T_CODE(val)                    vxge_vBIT(val, 12, 4)
00218 #define VXGE_HW_FIFO_TXD_T_CODE_UNUSED          VXGE_HW_FIFO_T_CODE_UNUSED
00219 
00220 #define VXGE_HW_FIFO_TXD_GATHER_CODE(val)               vxge_vBIT(val, 22, 2)
00221 #define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST      VXGE_HW_FIFO_GATHER_CODE_FIRST
00222 #define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST       VXGE_HW_FIFO_GATHER_CODE_LAST
00223 
00224 #define VXGE_HW_FIFO_TXD_LSO_EN                         vxge_mBIT(30)
00225 #define VXGE_HW_FIFO_TXD_LSO_MSS(val)                   vxge_vBIT(val, 34, 14)
00226 #define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val)               vxge_vBIT(val, 48, 16)
00227 
00228         u64 control_1;
00229 #define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN                 vxge_mBIT(5)
00230 #define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN                  vxge_mBIT(6)
00231 #define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN                  vxge_mBIT(7)
00232 #define VXGE_HW_FIFO_TXD_VLAN_ENABLE                    vxge_mBIT(15)
00233 
00234 #define VXGE_HW_FIFO_TXD_VLAN_TAG(val)                  vxge_vBIT(val, 16, 16)
00235 #define VXGE_HW_FIFO_TXD_NO_BW_LIMIT                    vxge_mBIT(43)
00236 
00237 #define VXGE_HW_FIFO_TXD_INT_NUMBER(val)                vxge_vBIT(val, 34, 6)
00238 
00239 #define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST              vxge_mBIT(46)
00240 #define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ                 vxge_mBIT(47)
00241 
00242         u64 buffer_pointer;
00243 
00244         u64 host_control;
00245 };
00246 
00247 /**
00248  * struct vxge_hw_device_date - Date Format
00249  * @day: Day
00250  * @month: Month
00251  * @year: Year
00252  * @date: Date in string format
00253  *
00254  * Structure for returning date
00255  */
00256 
00257 #define VXGE_HW_FW_STRLEN       32
00258 struct vxge_hw_device_date {
00259         u32     day;
00260         u32     month;
00261         u32     year;
00262         char    date[VXGE_HW_FW_STRLEN];
00263 };
00264 
00265 struct vxge_hw_device_version {
00266         u32     major;
00267         u32     minor;
00268         u32     build;
00269         char    version[VXGE_HW_FW_STRLEN];
00270 };
00271 
00272 u64 __vxge_hw_vpath_pci_func_mode_get(
00273         u32 vp_id,
00274         struct vxge_hw_vpath_reg __iomem *vpath_reg);
00275 
00276 /*
00277  * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
00278  * @control_0: Bits 0 to 7 - Doorbell type.
00279  *             Bits 8 to 31 - Reserved.
00280  *             Bits 32 to 39 - The highest TxD in this TxDL.
00281  *             Bits 40 to 47 - Reserved.
00282  *             Bits 48 to 55 - Reserved.
00283  *             Bits 56 to 63 - No snoop flags.
00284  * @txdl_ptr:  The starting location of the TxDL in host memory.
00285  *
00286  * Created by the host and written to the adapter via PIO to a Kernel Doorbell
00287  * FIFO. All non-offload doorbell wrapper fields must be written by the host as
00288  * part of a doorbell write. Consumed by the adapter but is not written by the
00289  * adapter.
00290  */
00291 struct __vxge_hw_non_offload_db_wrapper {
00292         u64             control_0;
00293 #define VXGE_HW_NODBW_GET_TYPE(ctrl0)                   vxge_bVALn(ctrl0, 0, 8)
00294 #define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
00295 #define VXGE_HW_NODBW_TYPE_NODBW                                0
00296 
00297 #define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0)        vxge_bVALn(ctrl0, 32, 8)
00298 #define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
00299 
00300 #define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0)               vxge_bVALn(ctrl0, 56, 8)
00301 #define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
00302 #define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE         0x2
00303 #define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ          0x1
00304 
00305         u64             txdl_ptr;
00306 };
00307 
00308 /*
00309  * struct __vxge_hw_fifo - Fifo.
00310  * @vp_id: Virtual path id
00311  * @tx_intr_num: Interrupt Number associated with the TX
00312  * @txdl: Start pointer of the txdl list of this fifo.
00313  *        iPXE does not support tx fragmentation, so we need
00314  *        only one txd in a list
00315  * @depth: total number of lists in this fifo
00316  * @hw_offset: txd index from where adapter owns the txd list
00317  * @sw_offset: txd index from where driver owns the txd list
00318  *
00319  * @stats: Statistics of this fifo
00320  *
00321  */
00322 struct __vxge_hw_fifo {
00323         struct vxge_hw_vpath_reg                *vp_reg;
00324         struct __vxge_hw_non_offload_db_wrapper *nofl_db;
00325         u32                                     vp_id;
00326         u32                                     tx_intr_num;
00327 
00328         struct vxge_hw_fifo_txd         *txdl;
00329 #define VXGE_HW_FIFO_TXD_DEPTH 128
00330         u16                             depth;
00331         u16                             hw_offset;
00332         u16                             sw_offset;
00333 
00334         struct __vxge_hw_virtualpath    *vpathh;
00335 };
00336 
00337 /* Structure that represents the Rx descriptor block which contains
00338  * 128 Rx descriptors.
00339  */
00340 struct __vxge_hw_ring_block {
00341 #define VXGE_HW_MAX_RXDS_PER_BLOCK_1            127
00342         struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1];
00343 
00344         u64 reserved_0;
00345 #define END_OF_BLOCK    0xFEFFFFFFFFFFFFFFULL
00346         /* 0xFEFFFFFFFFFFFFFF to mark last Rxd in this blk */
00347         u64 reserved_1;
00348         /* Logical ptr to next */
00349         u64 reserved_2_pNext_RxD_block;
00350         /* Buff0_ptr.In a 32 bit arch the upper 32 bits should be 0 */
00351         u64 pNext_RxD_Blk_physical;
00352 };
00353 
00354 /*
00355  * struct __vxge_hw_ring - Ring channel.
00356  *
00357  * Note: The structure is cache line aligned to better utilize
00358  *       CPU cache performance.
00359  */
00360 struct __vxge_hw_ring {
00361         struct vxge_hw_vpath_reg                *vp_reg;
00362         struct vxge_hw_common_reg               *common_reg;
00363         u32                                     vp_id;
00364 #define VXGE_HW_RING_RXD_QWORDS_MODE_1  4
00365         u32                                     doorbell_cnt;
00366         u32                                     total_db_cnt;
00367 #define VXGE_HW_RING_RXD_QWORD_LIMIT    16
00368         u64                                     rxd_qword_limit;
00369 
00370         struct __vxge_hw_ring_block             *rxdl;
00371 #define VXGE_HW_RING_BUF_PER_BLOCK      9
00372         u16                                     buf_per_block;
00373         u16                                     rxd_offset;
00374 
00375 #define VXGE_HW_RING_RX_POLL_WEIGHT     8
00376         u16                                     rx_poll_weight;
00377 
00378         struct io_buffer *iobuf[VXGE_HW_RING_BUF_PER_BLOCK + 1];
00379         struct __vxge_hw_virtualpath *vpathh;
00380 };
00381 
00382 /*
00383  * struct __vxge_hw_virtualpath - Virtual Path
00384  *
00385  * Virtual path structure to encapsulate the data related to a virtual path.
00386  * Virtual paths are allocated by the HW upon getting configuration from the
00387  * driver and inserted into the list of virtual paths.
00388  */
00389 struct __vxge_hw_virtualpath {
00390         u32                             vp_id;
00391 
00392         u32                             vp_open;
00393 #define VXGE_HW_VP_NOT_OPEN     0
00394 #define VXGE_HW_VP_OPEN         1
00395 
00396         struct __vxge_hw_device         *hldev;
00397         struct vxge_hw_vpath_reg        *vp_reg;
00398         struct vxge_hw_vpmgmt_reg       *vpmgmt_reg;
00399         struct __vxge_hw_non_offload_db_wrapper *nofl_db;
00400 
00401         u32                             max_mtu;
00402         u32                             vsport_number;
00403         u32                             max_kdfc_db;
00404         u32                             max_nofl_db;
00405 
00406         struct __vxge_hw_ring ringh;
00407         struct __vxge_hw_fifo fifoh;
00408 };
00409 #define VXGE_HW_INFO_LEN        64
00410 #define VXGE_HW_PMD_INFO_LEN    16
00411 #define VXGE_MAX_PRINT_BUF_SIZE 128
00412 /**
00413  * struct vxge_hw_device_hw_info - Device information
00414  * @host_type: Host Type
00415  * @func_id: Function Id
00416  * @vpath_mask: vpath bit mask
00417  * @fw_version: Firmware version
00418  * @fw_date: Firmware Date
00419  * @flash_version: Firmware version
00420  * @flash_date: Firmware Date
00421  * @mac_addrs: Mac addresses for each vpath
00422  * @mac_addr_masks: Mac address masks for each vpath
00423  *
00424  * Returns the vpath mask that has the bits set for each vpath allocated
00425  * for the driver and the first mac address for each vpath
00426  */
00427 struct vxge_hw_device_hw_info {
00428         u32             host_type;
00429 #define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION                     0
00430 #define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION                      1
00431 #define VXGE_HW_NO_MR_SR_VH0_FUNCTION0                          2
00432 #define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION                   3
00433 #define VXGE_HW_MR_SR_VH0_INVALID_CONFIG                        4
00434 #define VXGE_HW_SR_VH_FUNCTION0                                 5
00435 #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION                          6
00436 #define VXGE_HW_VH_NORMAL_FUNCTION                              7
00437         u64             function_mode;
00438 #define VXGE_HW_FUNCTION_MODE_MIN                               0
00439 #define VXGE_HW_FUNCTION_MODE_MAX                               11
00440 
00441 #define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION                   0
00442 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION                    1
00443 #define VXGE_HW_FUNCTION_MODE_SRIOV                             2
00444 #define VXGE_HW_FUNCTION_MODE_MRIOV                             3
00445 #define VXGE_HW_FUNCTION_MODE_MRIOV_8                           4
00446 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17                 5
00447 #define VXGE_HW_FUNCTION_MODE_SRIOV_8                           6
00448 #define VXGE_HW_FUNCTION_MODE_SRIOV_4                           7
00449 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2                  8
00450 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4                  9
00451 #define VXGE_HW_FUNCTION_MODE_MRIOV_4                           10
00452 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_DIRECT_IO          11
00453 
00454         u32             func_id;
00455         u64             vpath_mask;
00456         struct vxge_hw_device_version fw_version;
00457         struct vxge_hw_device_date    fw_date;
00458         struct vxge_hw_device_version flash_version;
00459         struct vxge_hw_device_date    flash_date;
00460         u8              serial_number[VXGE_HW_INFO_LEN];
00461         u8              part_number[VXGE_HW_INFO_LEN];
00462         u8              product_desc[VXGE_HW_INFO_LEN];
00463         u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
00464         u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
00465 };
00466 
00467 /**
00468  * struct __vxge_hw_device  - Hal device object
00469  * @magic: Magic Number
00470  * @bar0: BAR0 virtual address.
00471  * @pdev: Physical device handle
00472  * @config: Confguration passed by the LL driver at initialization
00473  * @link_state: Link state
00474  *
00475  * HW device object. Represents Titan adapter
00476  */
00477 struct __vxge_hw_device {
00478         u32                             magic;
00479 #define VXGE_HW_DEVICE_MAGIC            0x12345678
00480 #define VXGE_HW_DEVICE_DEAD             0xDEADDEAD
00481         void __iomem                    *bar0;
00482         struct pci_device               *pdev;
00483         struct net_device               *ndev;
00484         struct vxgedev                  *vdev;
00485 
00486         enum vxge_hw_device_link_state  link_state;
00487 
00488         u32                             host_type;
00489         u32                             func_id;
00490         u8                              titan1;
00491         u32                             access_rights;
00492 #define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH      0x1
00493 #define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM     0x2
00494 #define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM     0x4
00495         struct vxge_hw_legacy_reg       *legacy_reg;
00496         struct vxge_hw_toc_reg          *toc_reg;
00497         struct vxge_hw_common_reg       *common_reg;
00498         struct vxge_hw_mrpcim_reg       *mrpcim_reg;
00499         struct vxge_hw_srpcim_reg       *srpcim_reg \
00500                                         [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
00501         struct vxge_hw_vpmgmt_reg       *vpmgmt_reg \
00502                                         [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
00503         struct vxge_hw_vpath_reg        *vpath_reg \
00504                                         [VXGE_HW_TITAN_VPATH_REG_SPACES];
00505         u8                              *kdfc;
00506         u8                              *usdc;
00507         struct __vxge_hw_virtualpath    virtual_path;
00508         u64                             vpath_assignments;
00509         u64                             vpaths_deployed;
00510         u32                             first_vp_id;
00511         u64                             tim_int_mask0[4];
00512         u32                             tim_int_mask1[4];
00513 
00514         struct vxge_hw_device_hw_info   hw_info;
00515 };
00516 
00517 #define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
00518 
00519 #define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) {    \
00520         if (i < 16) {                                   \
00521                 m0[0] |= vxge_vBIT(0x8, (i*4), 4);      \
00522                 m0[1] |= vxge_vBIT(0x4, (i*4), 4);      \
00523         }                                       \
00524         else {                                  \
00525                 m1[0] = 0x80000000;             \
00526                 m1[1] = 0x40000000;             \
00527         }                                       \
00528 }
00529 
00530 #define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) {  \
00531         if (i < 16) {                                   \
00532                 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4);     \
00533                 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4);     \
00534         }                                               \
00535         else {                                          \
00536                 m1[0] = 0;                              \
00537                 m1[1] = 0;                              \
00538         }                                               \
00539 }
00540 
00541 /**
00542  * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
00543  * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
00544  * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
00545  * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
00546  * device.
00547  * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
00548  * filling-in and posting later.
00549  *
00550  * Titan/HW descriptor states.
00551  *
00552  */
00553 enum vxge_hw_txdl_state {
00554         VXGE_HW_TXDL_STATE_NONE = 0,
00555         VXGE_HW_TXDL_STATE_AVAIL        = 1,
00556         VXGE_HW_TXDL_STATE_POSTED       = 2,
00557         VXGE_HW_TXDL_STATE_FREED        = 3
00558 };
00559 
00560 
00561 /* fifo and ring circular buffer offset tracking apis */
00562 static inline void __vxge_hw_desc_offset_up(u16 upper_limit,
00563                         u16 *offset)
00564 {
00565         if (++(*offset) >= upper_limit)
00566                 *offset = 0;
00567 }
00568 
00569 /* rxd offset handling apis */
00570 static inline void vxge_hw_ring_rxd_offset_up(u16 *offset)
00571 {
00572         __vxge_hw_desc_offset_up(VXGE_HW_MAX_RXDS_PER_BLOCK_1,
00573                         offset);
00574 }
00575 /* txd offset handling apis */
00576 static inline void vxge_hw_fifo_txd_offset_up(u16 *offset)
00577 {
00578         __vxge_hw_desc_offset_up(VXGE_HW_FIFO_TXD_DEPTH, offset);
00579 }
00580 
00581 /**
00582  * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
00583  * @rxdh: Descriptor handle.
00584  * @dma_pointer: DMA address of a single receive buffer this descriptor
00585  * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
00586  * the receive buffer should be already mapped to the device
00587  * @size: Size of the receive @dma_pointer buffer.
00588  *
00589  * Prepare 1-buffer-mode Rx     descriptor for posting
00590  * (via vxge_hw_ring_rxd_post()).
00591  *
00592  * This inline helper-function does not return any parameters and always
00593  * succeeds.
00594  *
00595  */
00596 static inline
00597 void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp,
00598         struct io_buffer *iob, u32 size)
00599 {
00600         rxdp->host_control = (intptr_t)(iob);
00601         rxdp->buffer0_ptr = virt_to_bus(iob->data);
00602         rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
00603         rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
00604 }
00605 
00606 enum vxge_hw_status vxge_hw_device_hw_info_get(
00607         struct pci_device *pdev,
00608         void __iomem *bar0,
00609         struct vxge_hw_device_hw_info *hw_info);
00610 
00611 enum vxge_hw_status
00612 __vxge_hw_vpath_fw_ver_get(
00613         struct vxge_hw_vpath_reg __iomem *vpath_reg,
00614         struct vxge_hw_device_hw_info *hw_info);
00615 
00616 enum vxge_hw_status
00617 __vxge_hw_vpath_card_info_get(
00618         struct vxge_hw_vpath_reg __iomem *vpath_reg,
00619         struct vxge_hw_device_hw_info *hw_info);
00620 
00621 /**
00622  * vxge_hw_device_link_state_get - Get link state.
00623  * @devh: HW device handle.
00624  *
00625  * Get link state.
00626  * Returns: link state.
00627  */
00628 static inline
00629 enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
00630         struct __vxge_hw_device *devh)
00631 {
00632         return devh->link_state;
00633 }
00634 
00635 void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
00636 
00637 enum vxge_hw_status vxge_hw_device_initialize(
00638         struct __vxge_hw_device **devh,
00639         void *bar0,
00640         struct pci_device *pdev,
00641         u8 titan1);
00642 
00643 enum vxge_hw_status
00644 vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath);
00645 
00646 enum vxge_hw_status
00647 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
00648 
00649 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath);
00650 
00651 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath);
00652 
00653 enum vxge_hw_status
00654 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath);
00655 
00656 void
00657 vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath);
00658 
00659 enum vxge_hw_status
00660 vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu);
00661 
00662 void
00663 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath);
00664 
00665 void
00666 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
00667 
00668 enum vxge_hw_status
00669 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
00670 
00671 enum vxge_hw_status
00672 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
00673 
00674 enum vxge_hw_status
00675 __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
00676         struct vxge_hw_vpath_reg __iomem *vpath_reg);
00677 
00678 enum vxge_hw_status
00679 __vxge_hw_device_register_poll(
00680         void __iomem    *reg,
00681         u64 mask, u32 max_millis);
00682 
00683 #ifndef readq
00684 static inline u64 readq(void __iomem *addr)
00685 {
00686         u64 ret = 0;
00687         ret = readl(addr + 4);
00688         ret <<= 32;
00689         ret |= readl(addr);
00690 
00691         return ret;
00692 }
00693 #endif
00694 
00695 #ifndef writeq
00696 static inline void writeq(u64 val, void __iomem *addr)
00697 {
00698         writel((u32) (val), addr);
00699         writel((u32) (val >> 32), (addr + 4));
00700 }
00701 #endif
00702 
00703 static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
00704 {
00705         writel(val, addr + 4);
00706 }
00707 
00708 static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
00709 {
00710         writel(val, addr);
00711 }
00712 
00713 static inline enum vxge_hw_status
00714 __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
00715                           u64 mask, u32 max_millis)
00716 {
00717         enum vxge_hw_status status = VXGE_HW_OK;
00718 
00719         __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
00720         wmb();
00721         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
00722         wmb();
00723 
00724         status = __vxge_hw_device_register_poll(addr, mask, max_millis);
00725         return status;
00726 }
00727 
00728 void
00729 __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
00730 
00731 enum vxge_hw_status
00732 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
00733 
00734 enum vxge_hw_status
00735 __vxge_hw_vpath_pci_read(
00736         struct __vxge_hw_virtualpath    *vpath,
00737         u32                     phy_func_0,
00738         u32                     offset,
00739         u32                     *val);
00740 
00741 enum vxge_hw_status
00742 __vxge_hw_vpath_addr_get(
00743         struct vxge_hw_vpath_reg __iomem *vpath_reg,
00744         u8 (macaddr)[ETH_ALEN],
00745         u8 (macaddr_mask)[ETH_ALEN]);
00746 
00747 u32
00748 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
00749 
00750 enum vxge_hw_status
00751 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
00752 
00753 enum vxge_hw_status
00754 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
00755 
00756 /**
00757  * vxge_debug
00758  * @mask: mask for the debug
00759  * @fmt: printf like format string
00760  */
00761 static const u16 debug_filter = VXGE_ERR;
00762 #define vxge_debug(mask, fmt...)        do {    \
00763                 if (debug_filter & mask)        \
00764                         DBG(fmt);               \
00765         } while (0);
00766 
00767 #define vxge_trace()    vxge_debug(VXGE_TRACE, "%s:%d\n", __func__, __LINE__);
00768 
00769 enum vxge_hw_status
00770 vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode);
00771 
00772 enum vxge_hw_status
00773 vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
00774                 u64 vp_id, u32 action,
00775                 u32 offset, u64 data0, u64 data1);
00776 void
00777 vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev);
00778 
00779 #endif