iPXE
vxge_config.c
Go to the documentation of this file.
00001 /*
00002  * vxge-config.c: iPXE driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
00003  *              Virtualized Server Adapter.
00004  *
00005  * Copyright(c) 2002-2010 Neterion Inc.
00006  *
00007  * This software may be used and distributed according to the terms of
00008  * the GNU General Public License (GPL), incorporated herein by
00009  * reference.  Drivers based on or derived from this code fall under
00010  * the GPL and must retain the authorship, copyright and license
00011  * notice.
00012  *
00013  */
00014 
00015 FILE_LICENCE(GPL2_ONLY);
00016 
00017 #include <stdlib.h>
00018 #include <stdio.h>
00019 #include <ipxe/malloc.h>
00020 #include <ipxe/pci.h>
00021 #include <ipxe/iobuf.h>
00022 #include <ipxe/ethernet.h>
00023 #include <byteswap.h>
00024 
00025 #include "vxge_traffic.h"
00026 #include "vxge_config.h"
00027 #include "vxge_main.h"
00028 
00029 void
00030 vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev)
00031 {
00032         u64 val64;
00033         struct __vxge_hw_virtualpath *vpath;
00034         struct vxge_hw_vpath_reg __iomem *vp_reg;
00035 
00036         vpath = &hldev->virtual_path;
00037         vp_reg = vpath->vp_reg;
00038 
00039         val64 = readq(&vp_reg->rxmac_vcfg0);
00040         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
00041         writeq(val64, &vp_reg->rxmac_vcfg0);
00042         val64 = readq(&vp_reg->rxmac_vcfg0);
00043         return;
00044 }
00045 
00046 enum vxge_hw_status
00047 vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
00048                 u64 vp_id,
00049                 u32 action,
00050                 u32 offset,
00051                 u64 data0,
00052                 u64 data1)
00053 {
00054         enum vxge_hw_status status = VXGE_HW_OK;
00055         u64 val64;
00056         u32 fw_memo = VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO;
00057 
00058         struct vxge_hw_vpath_reg __iomem *vp_reg;
00059 
00060         vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
00061 
00062         writeq(data0, &vp_reg->rts_access_steer_data0);
00063         writeq(data1, &vp_reg->rts_access_steer_data1);
00064 
00065         wmb();
00066 
00067         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
00068                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
00069                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
00070                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE;
00071 
00072         writeq(val64, &vp_reg->rts_access_steer_ctrl);
00073 
00074         wmb();
00075 
00076         status = __vxge_hw_device_register_poll(
00077                         &vp_reg->rts_access_steer_ctrl,
00078                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
00079                         WAIT_FACTOR *
00080                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
00081 
00082         if (status != VXGE_HW_OK)
00083                 return VXGE_HW_FAIL;
00084 
00085         val64 = readq(&vp_reg->rts_access_steer_ctrl);
00086 
00087         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
00088                 status = VXGE_HW_OK;
00089         else
00090                 status = VXGE_HW_FAIL;
00091 
00092         return status;
00093 }
00094 
00095 /* Get function mode */
00096 enum vxge_hw_status
00097 vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode)
00098 {
00099         enum vxge_hw_status status = VXGE_HW_OK;
00100         struct vxge_hw_vpath_reg __iomem *vp_reg;
00101         u64 val64;
00102         int vp_id;
00103 
00104         /* get the first vpath number assigned to this function */
00105         vp_id = hldev->first_vp_id;
00106 
00107         vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
00108 
00109         status = vxge_hw_set_fw_api(hldev, vp_id,
00110                                 VXGE_HW_FW_API_GET_FUNC_MODE, 0, 0, 0);
00111 
00112         if (status == VXGE_HW_OK) {
00113                 val64 = readq(&vp_reg->rts_access_steer_data0);
00114                 *func_mode = VXGE_HW_GET_FUNC_MODE_VAL(val64);
00115         }
00116 
00117         return status;
00118 }
00119 
00120 /*
00121  * __vxge_hw_device_pci_e_init
00122  * Initialize certain PCI/PCI-X configuration registers
00123  * with recommended values. Save config space for future hw resets.
00124  */
00125 void
00126 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
00127 {
00128         u16 cmd = 0;
00129         struct pci_device *pdev = hldev->pdev;
00130 
00131         vxge_trace();
00132 
00133         /* Set the PErr Repconse bit and SERR in PCI command register. */
00134         pci_read_config_word(pdev, PCI_COMMAND, &cmd);
00135         cmd |= 0x140;
00136         pci_write_config_word(pdev, PCI_COMMAND, cmd);
00137 
00138         return;
00139 }
00140 
00141 /*
00142  * __vxge_hw_device_register_poll
00143  * Will poll certain register for specified amount of time.
00144  * Will poll until masked bit is not cleared.
00145  */
00146 enum vxge_hw_status
00147 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
00148 {
00149         u64 val64;
00150         u32 i = 0;
00151         enum vxge_hw_status ret = VXGE_HW_FAIL;
00152 
00153         udelay(10);
00154 
00155         do {
00156                 val64 = readq(reg);
00157                 if (!(val64 & mask))
00158                         return VXGE_HW_OK;
00159                 udelay(100);
00160         } while (++i <= 9);
00161 
00162         i = 0;
00163         do {
00164                 val64 = readq(reg);
00165                 if (!(val64 & mask))
00166                         return VXGE_HW_OK;
00167                 udelay(1000);
00168         } while (++i <= max_millis);
00169 
00170         return ret;
00171 }
00172 
00173  /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
00174  * in progress
00175  * This routine checks the vpath reset in progress register is turned zero
00176  */
00177 enum vxge_hw_status
00178 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
00179 {
00180         enum vxge_hw_status status;
00181 
00182         vxge_trace();
00183 
00184         status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
00185                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
00186                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
00187         return status;
00188 }
00189 
00190 /*
00191  * __vxge_hw_device_get_legacy_reg
00192  * This routine gets the legacy register section's memory mapped address
00193  * and sets the swapper.
00194  */
00195 static struct vxge_hw_legacy_reg __iomem *
00196 __vxge_hw_device_get_legacy_reg(struct pci_device *pdev, void __iomem *bar0)
00197 {
00198         enum vxge_hw_status status;
00199         struct vxge_hw_legacy_reg __iomem *legacy_reg;
00200         /*
00201          * If the length of Bar0 is 16MB, then assume that we are configured
00202          * in MF8P_VP2 mode and then add 8MB to the legacy_reg offsets
00203          */
00204         if (pci_bar_size(pdev, PCI_BASE_ADDRESS_0) == 0x1000000)
00205                 legacy_reg = (struct vxge_hw_legacy_reg __iomem *)
00206                                 (bar0 + 0x800000);
00207         else
00208                 legacy_reg = (struct vxge_hw_legacy_reg __iomem *)bar0;
00209 
00210         status = __vxge_hw_legacy_swapper_set(legacy_reg);
00211         if (status != VXGE_HW_OK)
00212                 return NULL;
00213 
00214         return legacy_reg;
00215 }
00216 /*
00217  * __vxge_hw_device_toc_get
00218  * This routine sets the swapper and reads the toc pointer and returns the
00219  * memory mapped address of the toc
00220  */
00221 struct vxge_hw_toc_reg __iomem *
00222 __vxge_hw_device_toc_get(void __iomem *bar0,
00223         struct vxge_hw_legacy_reg __iomem *legacy_reg)
00224 {
00225         u64 val64;
00226         struct vxge_hw_toc_reg __iomem *toc = NULL;
00227 
00228         val64 = readq(&legacy_reg->toc_first_pointer);
00229         toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
00230 
00231         return toc;
00232 }
00233 
00234 /*
00235  * __vxge_hw_device_reg_addr_get
00236  * This routine sets the swapper and reads the toc pointer and initializes the
00237  * register location pointers in the device object. It waits until the ric is
00238  * completed initializing registers.
00239  */
00240 enum vxge_hw_status
00241 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
00242 {
00243         u64 val64;
00244         u32 i;
00245         enum vxge_hw_status status = VXGE_HW_OK;
00246 
00247         hldev->legacy_reg = __vxge_hw_device_get_legacy_reg(hldev->pdev,
00248                                         hldev->bar0);
00249         if (hldev->legacy_reg  == NULL) {
00250                 status = VXGE_HW_FAIL;
00251                 goto exit;
00252         }
00253 
00254         hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0,
00255                                         hldev->legacy_reg);
00256         if (hldev->toc_reg  == NULL) {
00257                 status = VXGE_HW_FAIL;
00258                 goto exit;
00259         }
00260 
00261         val64 = readq(&hldev->toc_reg->toc_common_pointer);
00262         hldev->common_reg =
00263                 (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
00264 
00265         val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
00266         hldev->mrpcim_reg =
00267                 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
00268 
00269         for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
00270                 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
00271                 hldev->srpcim_reg[i] =
00272                         (struct vxge_hw_srpcim_reg __iomem *)
00273                                 (hldev->bar0 + val64);
00274         }
00275 
00276         for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
00277                 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
00278                 hldev->vpmgmt_reg[i] =
00279                 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
00280         }
00281 
00282         for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
00283                 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
00284                 hldev->vpath_reg[i] =
00285                         (struct vxge_hw_vpath_reg __iomem *)
00286                                 (hldev->bar0 + val64);
00287         }
00288 
00289         val64 = readq(&hldev->toc_reg->toc_kdfc);
00290 
00291         switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
00292         case 0:
00293                 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
00294                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
00295                 break;
00296         default:
00297                 break;
00298         }
00299 
00300         status = __vxge_hw_device_vpath_reset_in_prog_check(
00301                         (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
00302 exit:
00303         return status;
00304 }
00305 
00306 /*
00307  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
00308  * This routine returns the Access Rights of the driver
00309  */
00310 static u32
00311 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
00312 {
00313         u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
00314 
00315         switch (host_type) {
00316         case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
00317                 if (func_id == 0) {
00318                         access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
00319                                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
00320                 }
00321                 break;
00322         case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
00323                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
00324                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
00325                 break;
00326         case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
00327                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
00328                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
00329                 break;
00330         case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
00331         case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
00332         case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
00333                 break;
00334         case VXGE_HW_SR_VH_FUNCTION0:
00335         case VXGE_HW_VH_NORMAL_FUNCTION:
00336                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
00337                 break;
00338         }
00339 
00340         return access_rights;
00341 }
00342 
00343 /*
00344  * __vxge_hw_device_host_info_get
00345  * This routine returns the host type assignments
00346  */
00347 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
00348 {
00349         u64 val64;
00350         u32 i;
00351 
00352         val64 = readq(&hldev->common_reg->host_type_assignments);
00353 
00354         hldev->host_type =
00355            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
00356 
00357         hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
00358 
00359         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
00360 
00361                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
00362                         continue;
00363 
00364                 hldev->func_id =
00365                         __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
00366 
00367                 hldev->access_rights = __vxge_hw_device_access_rights_get(
00368                         hldev->host_type, hldev->func_id);
00369 
00370                 hldev->first_vp_id = i;
00371                 break;
00372         }
00373 
00374         return;
00375 }
00376 
00377 /**
00378  * vxge_hw_device_hw_info_get - Get the hw information
00379  * Returns the vpath mask that has the bits set for each vpath allocated
00380  * for the driver, FW version information and the first mac addresse for
00381  * each vpath
00382  */
00383 enum vxge_hw_status
00384 vxge_hw_device_hw_info_get(struct pci_device *pdev, void __iomem *bar0,
00385                                 struct vxge_hw_device_hw_info *hw_info)
00386 {
00387         u32 i;
00388         u64 val64;
00389         struct vxge_hw_toc_reg __iomem *toc;
00390         struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
00391         struct vxge_hw_common_reg __iomem *common_reg;
00392         struct vxge_hw_vpath_reg __iomem *vpath_reg;
00393         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
00394         struct vxge_hw_legacy_reg __iomem *legacy_reg;
00395         enum vxge_hw_status status;
00396 
00397         vxge_trace();
00398 
00399         memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
00400 
00401         legacy_reg = __vxge_hw_device_get_legacy_reg(pdev, bar0);
00402         if (legacy_reg == NULL) {
00403                 status = VXGE_HW_ERR_CRITICAL;
00404                 goto exit;
00405         }
00406 
00407         toc = __vxge_hw_device_toc_get(bar0, legacy_reg);
00408         if (toc == NULL) {
00409                 status = VXGE_HW_ERR_CRITICAL;
00410                 goto exit;
00411         }
00412 
00413         val64 = readq(&toc->toc_common_pointer);
00414         common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
00415 
00416         status = __vxge_hw_device_vpath_reset_in_prog_check(
00417                 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
00418         if (status != VXGE_HW_OK)
00419                 goto exit;
00420 
00421         hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
00422 
00423         val64 = readq(&common_reg->host_type_assignments);
00424 
00425         hw_info->host_type =
00426            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
00427 
00428         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
00429 
00430                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
00431                         continue;
00432 
00433                 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
00434 
00435                 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
00436                                 (bar0 + val64);
00437 
00438                 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
00439                 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
00440                         hw_info->func_id) &
00441                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
00442 
00443                         val64 = readq(&toc->toc_mrpcim_pointer);
00444 
00445                         mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
00446                                         (bar0 + val64);
00447 
00448                         writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
00449                         wmb();
00450                 }
00451 
00452                 val64 = readq(&toc->toc_vpath_pointer[i]);
00453 
00454                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
00455 
00456                 status = __vxge_hw_vpath_fw_ver_get(vpath_reg, hw_info);
00457                 if (status != VXGE_HW_OK)
00458                         goto exit;
00459 
00460                 status = __vxge_hw_vpath_card_info_get(vpath_reg, hw_info);
00461                 if (status != VXGE_HW_OK)
00462                         goto exit;
00463 
00464                 break;
00465         }
00466 
00467         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
00468 
00469                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
00470                         continue;
00471 
00472                 val64 = readq(&toc->toc_vpath_pointer[i]);
00473                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
00474 
00475                 status =  __vxge_hw_vpath_addr_get(vpath_reg,
00476                                 hw_info->mac_addrs[i],
00477                                 hw_info->mac_addr_masks[i]);
00478                 if (status != VXGE_HW_OK)
00479                         goto exit;
00480         }
00481 exit:
00482         return status;
00483 }
00484 
00485 /*
00486  * vxge_hw_device_initialize - Initialize Titan device.
00487  * Initialize Titan device. Note that all the arguments of this public API
00488  * are 'IN', including @hldev. Driver cooperates with
00489  * OS to find new Titan device, locate its PCI and memory spaces.
00490  *
00491  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
00492  * to enable the latter to perform Titan hardware initialization.
00493  */
00494 enum vxge_hw_status
00495 vxge_hw_device_initialize(
00496         struct __vxge_hw_device **devh,
00497         void *bar0,
00498         struct pci_device *pdev,
00499         u8 titan1)
00500 {
00501         struct __vxge_hw_device *hldev = NULL;
00502         enum vxge_hw_status status = VXGE_HW_OK;
00503 
00504         vxge_trace();
00505 
00506         hldev = (struct __vxge_hw_device *)
00507                         zalloc(sizeof(struct __vxge_hw_device));
00508         if (hldev == NULL) {
00509                 vxge_debug(VXGE_ERR, "hldev allocation failed\n");
00510                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
00511                 goto exit;
00512         }
00513 
00514         hldev->magic = VXGE_HW_DEVICE_MAGIC;
00515 
00516         hldev->bar0 = bar0;
00517         hldev->pdev = pdev;
00518         hldev->titan1 = titan1;
00519 
00520         __vxge_hw_device_pci_e_init(hldev);
00521 
00522         status = __vxge_hw_device_reg_addr_get(hldev);
00523         if (status != VXGE_HW_OK) {
00524                 vxge_debug(VXGE_ERR, "%s:%d __vxge_hw_device_reg_addr_get "
00525                         "failed\n", __func__, __LINE__);
00526                 vxge_hw_device_terminate(hldev);
00527                 goto exit;
00528         }
00529 
00530         __vxge_hw_device_host_info_get(hldev);
00531 
00532         *devh = hldev;
00533 exit:
00534         return status;
00535 }
00536 
00537 /*
00538  * vxge_hw_device_terminate - Terminate Titan device.
00539  * Terminate HW device.
00540  */
00541 void
00542 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
00543 {
00544         vxge_trace();
00545 
00546         assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
00547 
00548         hldev->magic = VXGE_HW_DEVICE_DEAD;
00549         free(hldev);
00550 }
00551 
00552 /*
00553  *vxge_hw_ring_replenish - Initial replenish of RxDs
00554  * This function replenishes the RxDs from reserve array to work array
00555  */
00556 enum vxge_hw_status
00557 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
00558 {
00559         struct __vxge_hw_device *hldev;
00560         struct vxge_hw_ring_rxd_1 *rxd;
00561         enum vxge_hw_status status = VXGE_HW_OK;
00562         u8 offset = 0;
00563         struct __vxge_hw_ring_block *block;
00564         u8 i, iob_off;
00565 
00566         vxge_trace();
00567 
00568         hldev = ring->vpathh->hldev;
00569         /*
00570          * We allocate all the dma buffers first and then share the
00571          * these buffers among the all rx descriptors in the block.
00572          */
00573         for (i = 0; i < ARRAY_SIZE(ring->iobuf); i++) {
00574                 ring->iobuf[i] = alloc_iob(VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
00575                 if (!ring->iobuf[i]) {
00576                         while (i) {
00577                                 free_iob(ring->iobuf[--i]);
00578                                 ring->iobuf[i] = NULL;
00579                         }
00580                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
00581                         goto iobuf_err;
00582                 }
00583         }
00584 
00585         for (offset = 0; offset < VXGE_HW_MAX_RXDS_PER_BLOCK_1; offset++) {
00586 
00587                 rxd = &ring->rxdl->rxd[offset];
00588                 if (offset == (VXGE_HW_MAX_RXDS_PER_BLOCK_1 - 1))
00589                         iob_off = VXGE_HW_RING_BUF_PER_BLOCK;
00590                 else
00591                         iob_off = offset % ring->buf_per_block;
00592 
00593                 rxd->control_0 = rxd->control_1 = 0;
00594                 vxge_hw_ring_rxd_1b_set(rxd, ring->iobuf[iob_off],
00595                                 VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
00596 
00597                 vxge_hw_ring_rxd_post(ring, rxd);
00598         }
00599         /* linking the block to itself as we use only one rx block*/
00600         block = ring->rxdl;
00601         block->reserved_2_pNext_RxD_block = (unsigned long) block;
00602         block->pNext_RxD_Blk_physical = (u64)virt_to_bus(block);
00603 
00604         ring->rxd_offset = 0;
00605 iobuf_err:
00606         return status;
00607 }
00608 
00609 /*
00610  * __vxge_hw_ring_create - Create a Ring
00611  * This function creates Ring and initializes it.
00612  *
00613  */
00614 enum vxge_hw_status
00615 __vxge_hw_ring_create(struct __vxge_hw_virtualpath *vpath,
00616                       struct __vxge_hw_ring *ring)
00617 {
00618         enum vxge_hw_status status = VXGE_HW_OK;
00619         struct __vxge_hw_device *hldev;
00620         u32 vp_id;
00621 
00622         vxge_trace();
00623 
00624         hldev = vpath->hldev;
00625         vp_id = vpath->vp_id;
00626 
00627         ring->rxdl = malloc_dma(sizeof(struct __vxge_hw_ring_block),
00628                         sizeof(struct __vxge_hw_ring_block));
00629         if (!ring->rxdl) {
00630                 vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
00631                                 __func__, __LINE__);
00632                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
00633                 goto exit;
00634         }
00635         ring->rxd_offset = 0;
00636         ring->vpathh = vpath;
00637         ring->buf_per_block = VXGE_HW_RING_BUF_PER_BLOCK;
00638         ring->rx_poll_weight = VXGE_HW_RING_RX_POLL_WEIGHT;
00639         ring->vp_id = vp_id;
00640         ring->vp_reg = vpath->vp_reg;
00641         ring->common_reg = hldev->common_reg;
00642 
00643         ring->rxd_qword_limit = VXGE_HW_RING_RXD_QWORD_LIMIT;
00644 
00645         status = vxge_hw_ring_replenish(ring);
00646         if (status != VXGE_HW_OK) {
00647                 __vxge_hw_ring_delete(ring);
00648                 goto exit;
00649         }
00650 exit:
00651         return status;
00652 }
00653 
00654 /*
00655  * __vxge_hw_ring_delete - Removes the ring
00656  * This function freeup the memory pool and removes the ring
00657  */
00658 enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_ring *ring)
00659 {
00660         u8 i;
00661 
00662         vxge_trace();
00663 
00664         for (i = 0; (i < ARRAY_SIZE(ring->iobuf)) && ring->iobuf[i]; i++) {
00665                 free_iob(ring->iobuf[i]);
00666                 ring->iobuf[i] = NULL;
00667         }
00668 
00669         if (ring->rxdl) {
00670                 free_dma(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
00671                 ring->rxdl = NULL;
00672         }
00673         ring->rxd_offset = 0;
00674 
00675         return VXGE_HW_OK;
00676 }
00677 
00678 /*
00679  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
00680  * Set the swapper bits appropriately for the legacy section.
00681  */
00682 enum vxge_hw_status
00683 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
00684 {
00685         u64 val64;
00686         enum vxge_hw_status status = VXGE_HW_OK;
00687 
00688         vxge_trace();
00689 
00690         val64 = readq(&legacy_reg->toc_swapper_fb);
00691 
00692         wmb();
00693 
00694         switch (val64) {
00695 
00696         case VXGE_HW_SWAPPER_INITIAL_VALUE:
00697                 return status;
00698 
00699         case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
00700                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
00701                         &legacy_reg->pifm_rd_swap_en);
00702                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
00703                         &legacy_reg->pifm_rd_flip_en);
00704                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
00705                         &legacy_reg->pifm_wr_swap_en);
00706                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
00707                         &legacy_reg->pifm_wr_flip_en);
00708                 break;
00709 
00710         case VXGE_HW_SWAPPER_BYTE_SWAPPED:
00711                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
00712                         &legacy_reg->pifm_rd_swap_en);
00713                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
00714                         &legacy_reg->pifm_wr_swap_en);
00715                 break;
00716 
00717         case VXGE_HW_SWAPPER_BIT_FLIPPED:
00718                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
00719                         &legacy_reg->pifm_rd_flip_en);
00720                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
00721                         &legacy_reg->pifm_wr_flip_en);
00722                 break;
00723         }
00724 
00725         wmb();
00726 
00727         val64 = readq(&legacy_reg->toc_swapper_fb);
00728         if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
00729                 status = VXGE_HW_ERR_SWAPPER_CTRL;
00730 
00731         return status;
00732 }
00733 
00734 /*
00735  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
00736  * Set the swapper bits appropriately for the vpath.
00737  */
00738 enum vxge_hw_status
00739 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
00740 {
00741         vxge_trace();
00742 
00743 #if (__BYTE_ORDER != __BIG_ENDIAN)
00744         u64 val64;
00745 
00746         val64 = readq(&vpath_reg->vpath_general_cfg1);
00747         wmb();
00748         val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
00749         writeq(val64, &vpath_reg->vpath_general_cfg1);
00750         wmb();
00751 #endif
00752         return VXGE_HW_OK;
00753 }
00754 
00755 /*
00756  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
00757  * Set the swapper bits appropriately for the vpath.
00758  */
00759 enum vxge_hw_status
00760 __vxge_hw_kdfc_swapper_set(
00761         struct vxge_hw_legacy_reg __iomem *legacy_reg,
00762         struct vxge_hw_vpath_reg __iomem *vpath_reg)
00763 {
00764         u64 val64;
00765 
00766         vxge_trace();
00767 
00768         val64 = readq(&legacy_reg->pifm_wr_swap_en);
00769 
00770         if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
00771                 val64 = readq(&vpath_reg->kdfcctl_cfg0);
00772                 wmb();
00773 
00774                 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
00775                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
00776                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
00777 
00778                 writeq(val64, &vpath_reg->kdfcctl_cfg0);
00779                 wmb();
00780         }
00781 
00782         return VXGE_HW_OK;
00783 }
00784 
00785 /*
00786  * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
00787  */
00788 enum vxge_hw_status
00789 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
00790 {
00791         struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
00792         enum vxge_hw_status status = VXGE_HW_OK;
00793         int i = 0, j = 0;
00794 
00795         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
00796                 if (!((vpath_mask) & vxge_mBIT(i)))
00797                         continue;
00798                 vpmgmt_reg = hldev->vpmgmt_reg[i];
00799                 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
00800                         if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
00801                         & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
00802                                 return VXGE_HW_FAIL;
00803                 }
00804         }
00805         return status;
00806 }
00807 
00808 /*
00809  * __vxge_hw_fifo_create - Create a FIFO
00810  * This function creates FIFO and initializes it.
00811  */
00812 enum vxge_hw_status
00813 __vxge_hw_fifo_create(struct __vxge_hw_virtualpath *vpath,
00814                         struct __vxge_hw_fifo *fifo)
00815 {
00816         enum vxge_hw_status status = VXGE_HW_OK;
00817 
00818         vxge_trace();
00819 
00820         fifo->vpathh = vpath;
00821         fifo->depth = VXGE_HW_FIFO_TXD_DEPTH;
00822         fifo->hw_offset = fifo->sw_offset = 0;
00823         fifo->nofl_db = vpath->nofl_db;
00824         fifo->vp_id = vpath->vp_id;
00825         fifo->vp_reg = vpath->vp_reg;
00826         fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP)
00827                                 + VXGE_HW_VPATH_INTR_TX;
00828 
00829         fifo->txdl = malloc_dma(sizeof(struct vxge_hw_fifo_txd)
00830                                 * fifo->depth, fifo->depth);
00831         if (!fifo->txdl) {
00832                 vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
00833                                 __func__, __LINE__);
00834                 return VXGE_HW_ERR_OUT_OF_MEMORY;
00835         }
00836         memset(fifo->txdl, 0, sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
00837         return status;
00838 }
00839 
00840 /*
00841  * __vxge_hw_fifo_delete - Removes the FIFO
00842  * This function freeup the memory pool and removes the FIFO
00843  */
00844 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo)
00845 {
00846         vxge_trace();
00847 
00848         if (fifo->txdl)
00849                 free_dma(fifo->txdl,
00850                         sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
00851 
00852         fifo->txdl = NULL;
00853         fifo->hw_offset = fifo->sw_offset = 0;
00854 
00855         return VXGE_HW_OK;
00856 }
00857 
00858 /*
00859  * __vxge_hw_vpath_pci_read - Read the content of given address
00860  *                          in pci config space.
00861  * Read from the vpath pci config space.
00862  */
00863 enum vxge_hw_status
00864 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
00865                          u32 phy_func_0, u32 offset, u32 *val)
00866 {
00867         u64 val64;
00868         enum vxge_hw_status status = VXGE_HW_OK;
00869         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
00870 
00871         val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
00872 
00873         if (phy_func_0)
00874                 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
00875 
00876         writeq(val64, &vp_reg->pci_config_access_cfg1);
00877         wmb();
00878         writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
00879                         &vp_reg->pci_config_access_cfg2);
00880         wmb();
00881 
00882         status = __vxge_hw_device_register_poll(
00883                         &vp_reg->pci_config_access_cfg2,
00884                         VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
00885 
00886         if (status != VXGE_HW_OK)
00887                 goto exit;
00888 
00889         val64 = readq(&vp_reg->pci_config_access_status);
00890 
00891         if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
00892                 status = VXGE_HW_FAIL;
00893                 *val = 0;
00894         } else
00895                 *val = (u32)vxge_bVALn(val64, 32, 32);
00896 exit:
00897         return status;
00898 }
00899 
00900 /*
00901  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
00902  * Returns the function number of the vpath.
00903  */
00904 u32
00905 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
00906 {
00907         u64 val64;
00908 
00909         val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
00910 
00911         return
00912          (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
00913 }
00914 
00915 /*
00916  * __vxge_hw_read_rts_ds - Program RTS steering critieria
00917  */
00918 static inline void
00919 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
00920                                 u64 dta_struct_sel)
00921 {
00922         writeq(0, &vpath_reg->rts_access_steer_ctrl);
00923         wmb();
00924         writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
00925         writeq(0, &vpath_reg->rts_access_steer_data1);
00926         wmb();
00927         return;
00928 }
00929 
00930 /*
00931  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
00932  * part number and product description.
00933  */
00934 enum vxge_hw_status
00935 __vxge_hw_vpath_card_info_get(
00936         struct vxge_hw_vpath_reg __iomem *vpath_reg,
00937         struct vxge_hw_device_hw_info *hw_info)
00938 {
00939         u32 i, j;
00940         u64 val64;
00941         u64 data1 = 0ULL;
00942         u64 data2 = 0ULL;
00943         enum vxge_hw_status status = VXGE_HW_OK;
00944         u8 *serial_number = hw_info->serial_number;
00945         u8 *part_number = hw_info->part_number;
00946         u8 *product_desc = hw_info->product_desc;
00947 
00948         __vxge_hw_read_rts_ds(vpath_reg,
00949                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
00950 
00951         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
00952                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
00953                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
00954                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
00955                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
00956                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
00957 
00958         status = __vxge_hw_pio_mem_write64(val64,
00959                                 &vpath_reg->rts_access_steer_ctrl,
00960                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
00961                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
00962 
00963         if (status != VXGE_HW_OK)
00964                 return status;
00965 
00966         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
00967 
00968         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
00969                 data1 = readq(&vpath_reg->rts_access_steer_data0);
00970                 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
00971 
00972                 data2 = readq(&vpath_reg->rts_access_steer_data1);
00973                 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
00974                 status = VXGE_HW_OK;
00975         } else
00976                 *serial_number = 0;
00977 
00978         __vxge_hw_read_rts_ds(vpath_reg,
00979                         VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
00980 
00981         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
00982                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
00983                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
00984                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
00985                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
00986                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
00987 
00988         status = __vxge_hw_pio_mem_write64(val64,
00989                                 &vpath_reg->rts_access_steer_ctrl,
00990                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
00991                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
00992 
00993         if (status != VXGE_HW_OK)
00994                 return status;
00995 
00996         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
00997 
00998         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
00999 
01000                 data1 = readq(&vpath_reg->rts_access_steer_data0);
01001                 ((u64 *)part_number)[0] = be64_to_cpu(data1);
01002 
01003                 data2 = readq(&vpath_reg->rts_access_steer_data1);
01004                 ((u64 *)part_number)[1] = be64_to_cpu(data2);
01005 
01006                 status = VXGE_HW_OK;
01007 
01008         } else
01009                 *part_number = 0;
01010 
01011         j = 0;
01012 
01013         for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
01014              i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
01015 
01016                 __vxge_hw_read_rts_ds(vpath_reg, i);
01017 
01018                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
01019                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
01020                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
01021                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
01022                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
01023                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
01024 
01025                 status = __vxge_hw_pio_mem_write64(val64,
01026                                 &vpath_reg->rts_access_steer_ctrl,
01027                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
01028                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
01029 
01030                 if (status != VXGE_HW_OK)
01031                         return status;
01032 
01033                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
01034 
01035                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
01036 
01037                         data1 = readq(&vpath_reg->rts_access_steer_data0);
01038                         ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
01039 
01040                         data2 = readq(&vpath_reg->rts_access_steer_data1);
01041                         ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
01042 
01043                         status = VXGE_HW_OK;
01044                 } else
01045                         *product_desc = 0;
01046         }
01047 
01048         return status;
01049 }
01050 
01051 /*
01052  * __vxge_hw_vpath_fw_ver_get - Get the fw version
01053  * Returns FW Version
01054  */
01055 enum vxge_hw_status
01056 __vxge_hw_vpath_fw_ver_get(
01057         struct vxge_hw_vpath_reg __iomem *vpath_reg,
01058         struct vxge_hw_device_hw_info *hw_info)
01059 {
01060         u64 val64;
01061         u64 data1 = 0ULL;
01062         u64 data2 = 0ULL;
01063         struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
01064         struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
01065         struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
01066         struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
01067         enum vxge_hw_status status = VXGE_HW_OK;
01068 
01069         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
01070                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
01071                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
01072                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
01073                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
01074                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
01075 
01076         status = __vxge_hw_pio_mem_write64(val64,
01077                                 &vpath_reg->rts_access_steer_ctrl,
01078                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
01079                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
01080 
01081         if (status != VXGE_HW_OK)
01082                 goto exit;
01083 
01084         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
01085 
01086         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
01087 
01088                 data1 = readq(&vpath_reg->rts_access_steer_data0);
01089                 data2 = readq(&vpath_reg->rts_access_steer_data1);
01090 
01091                 fw_date->day =
01092                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
01093                                                 data1);
01094                 fw_date->month =
01095                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
01096                                                 data1);
01097                 fw_date->year =
01098                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
01099                                                 data1);
01100 
01101                 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
01102                         fw_date->month, fw_date->day, fw_date->year);
01103 
01104                 fw_version->major =
01105                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
01106                 fw_version->minor =
01107                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
01108                 fw_version->build =
01109                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
01110 
01111                 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
01112                     fw_version->major, fw_version->minor, fw_version->build);
01113 
01114                 flash_date->day =
01115                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
01116                 flash_date->month =
01117                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
01118                 flash_date->year =
01119                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
01120 
01121                 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
01122                         flash_date->month, flash_date->day, flash_date->year);
01123 
01124                 flash_version->major =
01125                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
01126                 flash_version->minor =
01127                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
01128                 flash_version->build =
01129                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
01130 
01131                 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
01132                         flash_version->major, flash_version->minor,
01133                         flash_version->build);
01134 
01135                 status = VXGE_HW_OK;
01136 
01137         } else
01138                 status = VXGE_HW_FAIL;
01139 exit:
01140         return status;
01141 }
01142 
01143 /*
01144  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
01145  *               from MAC address table.
01146  */
01147 enum vxge_hw_status
01148 __vxge_hw_vpath_addr_get(
01149         struct vxge_hw_vpath_reg *vpath_reg,
01150         u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
01151 {
01152         u32 i;
01153         u64 val64;
01154         u64 data1 = 0ULL;
01155         u64 data2 = 0ULL;
01156         u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY;
01157         enum vxge_hw_status status = VXGE_HW_OK;
01158 
01159         while (1) {
01160                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
01161                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
01162                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
01163                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
01164                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
01165 
01166                 status = __vxge_hw_pio_mem_write64(val64,
01167                                         &vpath_reg->rts_access_steer_ctrl,
01168                                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
01169                                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
01170 
01171                 if (status != VXGE_HW_OK)
01172                         break;
01173 
01174                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
01175 
01176                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
01177 
01178                         data1 = readq(&vpath_reg->rts_access_steer_data0);
01179                         data2 = readq(&vpath_reg->rts_access_steer_data1);
01180 
01181                         data1 =
01182                          VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
01183                         data2 =
01184                          VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
01185                                                                 data2);
01186 
01187                         for (i = ETH_ALEN; i > 0; i--) {
01188                                 macaddr[i-1] = (u8)(data1 & 0xFF);
01189                                 data1 >>= 8;
01190 
01191                                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
01192                                 data2 >>= 8;
01193                         }
01194                         if (is_valid_ether_addr(macaddr)) {
01195                                 status = VXGE_HW_OK;
01196                                 break;
01197                         }
01198                         action =
01199                           VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
01200                 } else
01201                         status = VXGE_HW_FAIL;
01202         }
01203 
01204         return status;
01205 }
01206 
01207 /*
01208  * __vxge_hw_vpath_mgmt_read
01209  * This routine reads the vpath_mgmt registers
01210  */
01211 static enum vxge_hw_status
01212 __vxge_hw_vpath_mgmt_read(
01213         struct __vxge_hw_virtualpath *vpath)
01214 {
01215         u32 i, mtu = 0, max_pyld = 0;
01216         u64 val64;
01217         enum vxge_hw_status status = VXGE_HW_OK;
01218 
01219         for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
01220 
01221                 val64 = readq(&vpath->vpmgmt_reg->
01222                                 rxmac_cfg0_port_vpmgmt_clone[i]);
01223                 max_pyld =
01224                         (u32)
01225                         VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
01226                         (val64);
01227                 if (mtu < max_pyld)
01228                         mtu = max_pyld;
01229         }
01230 
01231         vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
01232 
01233         val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
01234 
01235         if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
01236                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
01237         else
01238                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
01239 
01240         return status;
01241 }
01242 
01243 /*
01244  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
01245  * This routine checks the vpath_rst_in_prog register to see if
01246  * adapter completed the reset process for the vpath
01247  */
01248 enum vxge_hw_status
01249 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
01250 {
01251         enum vxge_hw_status status;
01252 
01253         vxge_trace();
01254 
01255         status = __vxge_hw_device_register_poll(
01256                         &vpath->hldev->common_reg->vpath_rst_in_prog,
01257                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
01258                                 1 << (16 - vpath->vp_id)),
01259                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
01260 
01261         return status;
01262 }
01263 
01264 /*
01265  * __vxge_hw_vpath_reset
01266  * This routine resets the vpath on the device
01267  */
01268 enum vxge_hw_status
01269 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
01270 {
01271         u64 val64;
01272         enum vxge_hw_status status = VXGE_HW_OK;
01273 
01274         vxge_trace();
01275 
01276         val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
01277 
01278         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
01279                                 &hldev->common_reg->cmn_rsthdlr_cfg0);
01280 
01281         return status;
01282 }
01283 
01284 /*
01285  * __vxge_hw_vpath_prc_configure
01286  * This routine configures the prc registers of virtual path using the config
01287  * passed
01288  */
01289 void
01290 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev)
01291 {
01292         u64 val64;
01293         struct __vxge_hw_virtualpath *vpath;
01294         struct vxge_hw_vpath_reg __iomem *vp_reg;
01295 
01296         vxge_trace();
01297 
01298         vpath = &hldev->virtual_path;
01299         vp_reg = vpath->vp_reg;
01300 
01301         val64 = readq(&vp_reg->prc_cfg1);
01302         val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
01303         writeq(val64, &vp_reg->prc_cfg1);
01304 
01305         val64 = readq(&vpath->vp_reg->prc_cfg6);
01306         val64 &= ~VXGE_HW_PRC_CFG6_RXD_CRXDT(0x1ff);
01307         val64 &= ~VXGE_HW_PRC_CFG6_RXD_SPAT(0x1ff);
01308         val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
01309         val64 |= VXGE_HW_PRC_CFG6_RXD_CRXDT(0x3);
01310         val64 |= VXGE_HW_PRC_CFG6_RXD_SPAT(0xf);
01311         writeq(val64, &vpath->vp_reg->prc_cfg6);
01312 
01313         writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
01314                         (u64)virt_to_bus(vpath->ringh.rxdl) >> 3),
01315                         &vp_reg->prc_cfg5);
01316 
01317         val64 = readq(&vp_reg->prc_cfg4);
01318         val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
01319         val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
01320         val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
01321                         VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
01322         val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
01323 
01324         writeq(val64, &vp_reg->prc_cfg4);
01325         return;
01326 }
01327 
01328 /*
01329  * __vxge_hw_vpath_kdfc_configure
01330  * This routine configures the kdfc registers of virtual path using the
01331  * config passed
01332  */
01333 enum vxge_hw_status
01334 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
01335 {
01336         u64 val64;
01337         u64 vpath_stride;
01338         enum vxge_hw_status status = VXGE_HW_OK;
01339         struct __vxge_hw_virtualpath *vpath;
01340         struct vxge_hw_vpath_reg __iomem *vp_reg;
01341 
01342         vxge_trace();
01343 
01344         vpath = &hldev->virtual_path;
01345         vp_reg = vpath->vp_reg;
01346         status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
01347 
01348         if (status != VXGE_HW_OK)
01349                 goto exit;
01350 
01351         val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
01352 
01353         vpath->max_kdfc_db =
01354                 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
01355                         val64+1)/2;
01356 
01357         vpath->max_nofl_db = vpath->max_kdfc_db;
01358 
01359         val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
01360                                 (vpath->max_nofl_db*2)-1);
01361 
01362         writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
01363 
01364         writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
01365                 &vp_reg->kdfc_fifo_trpl_ctrl);
01366 
01367         val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
01368 
01369         val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
01370                    VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
01371 
01372         val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
01373                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
01374 #if (__BYTE_ORDER != __BIG_ENDIAN)
01375                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
01376 #endif
01377                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
01378 
01379         writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
01380         writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
01381         wmb();
01382         vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
01383 
01384         vpath->nofl_db =
01385                 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
01386                 (hldev->kdfc + (vp_id *
01387                 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
01388                                         vpath_stride)));
01389 exit:
01390         return status;
01391 }
01392 
01393 /*
01394  * __vxge_hw_vpath_mac_configure
01395  * This routine configures the mac of virtual path using the config passed
01396  */
01397 enum vxge_hw_status
01398 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev)
01399 {
01400         u64 val64;
01401         enum vxge_hw_status status = VXGE_HW_OK;
01402         struct __vxge_hw_virtualpath *vpath;
01403         struct vxge_hw_vpath_reg __iomem *vp_reg;
01404 
01405         vxge_trace();
01406 
01407         vpath = &hldev->virtual_path;
01408         vp_reg = vpath->vp_reg;
01409 
01410         writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
01411                         vpath->vsport_number), &vp_reg->xmac_vsport_choice);
01412 
01413         val64 = readq(&vp_reg->rxmac_vcfg1);
01414 
01415         val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
01416                 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
01417 
01418         writeq(val64, &vp_reg->rxmac_vcfg1);
01419         return status;
01420 }
01421 
01422 /*
01423  * __vxge_hw_vpath_tim_configure
01424  * This routine configures the tim registers of virtual path using the config
01425  * passed
01426  */
01427 enum vxge_hw_status
01428 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
01429 {
01430         u64 val64;
01431         enum vxge_hw_status status = VXGE_HW_OK;
01432         struct __vxge_hw_virtualpath *vpath;
01433         struct vxge_hw_vpath_reg __iomem *vp_reg;
01434 
01435         vxge_trace();
01436 
01437         vpath = &hldev->virtual_path;
01438         vp_reg = vpath->vp_reg;
01439 
01440         writeq((u64)0, &vp_reg->tim_dest_addr);
01441         writeq((u64)0, &vp_reg->tim_vpath_map);
01442         writeq((u64)0, &vp_reg->tim_bitmap);
01443         writeq((u64)0, &vp_reg->tim_remap);
01444 
01445         writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
01446                 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
01447                 VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
01448 
01449         val64 = readq(&vp_reg->tim_pci_cfg);
01450         val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
01451         writeq(val64, &vp_reg->tim_pci_cfg);
01452 
01453         /* TX configuration */
01454         val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
01455                         (VXGE_TTI_BTIMER_VAL * 1000) / 272);
01456         val64 |= (VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC |
01457                         VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI |
01458                         VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN);
01459         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(TTI_TX_URANGE_A) |
01460                         VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(TTI_TX_URANGE_B) |
01461                         VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(TTI_TX_URANGE_C);
01462         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
01463 
01464         val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(TTI_TX_UFC_A) |
01465                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(TTI_TX_UFC_B) |
01466                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(TTI_TX_UFC_C) |
01467                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(TTI_TX_UFC_D);
01468         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
01469 
01470         val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
01471                         VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL);
01472         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
01473                         (VXGE_TTI_LTIMER_VAL * 1000) / 272);
01474         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
01475 
01476         /* RX configuration */
01477         val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
01478                         (VXGE_RTI_BTIMER_VAL * 1000) / 272);
01479         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
01480         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(RTI_RX_URANGE_A) |
01481                         VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(RTI_RX_URANGE_B) |
01482                         VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(RTI_RX_URANGE_C);
01483         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
01484 
01485         val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(RTI_RX_UFC_A) |
01486                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(RTI_RX_UFC_B) |
01487                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(RTI_RX_UFC_C) |
01488                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(RTI_RX_UFC_D);
01489         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
01490 
01491         val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
01492                         VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL);
01493         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
01494                         (VXGE_RTI_LTIMER_VAL * 1000) / 272);
01495         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
01496 
01497         val64 = 0;
01498         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
01499         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
01500         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
01501         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
01502         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
01503         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
01504 
01505         return status;
01506 }
01507 
01508 /*
01509  * __vxge_hw_vpath_initialize
01510  * This routine is the final phase of init which initializes the
01511  * registers of the vpath using the configuration passed.
01512  */
01513 enum vxge_hw_status
01514 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
01515 {
01516         u64 val64;
01517         u32 val32;
01518         int i;
01519         enum vxge_hw_status status = VXGE_HW_OK;
01520         struct __vxge_hw_virtualpath *vpath;
01521         struct vxge_hw_vpath_reg *vp_reg;
01522 
01523         vxge_trace();
01524 
01525         vpath = &hldev->virtual_path;
01526 
01527         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
01528                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
01529                 goto exit;
01530         }
01531         vp_reg = vpath->vp_reg;
01532         status = __vxge_hw_legacy_swapper_set(hldev->legacy_reg);
01533         if (status != VXGE_HW_OK)
01534                 goto exit;
01535 
01536         status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
01537 
01538         if (status != VXGE_HW_OK)
01539                 goto exit;
01540         val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
01541 
01542         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
01543                 if (val64 & vxge_mBIT(i))
01544                         vpath->vsport_number = i;
01545         }
01546 
01547         status = __vxge_hw_vpath_mac_configure(hldev);
01548 
01549         if (status != VXGE_HW_OK)
01550                 goto exit;
01551 
01552         status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
01553 
01554         if (status != VXGE_HW_OK)
01555                 goto exit;
01556 
01557         status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
01558 
01559         if (status != VXGE_HW_OK)
01560                 goto exit;
01561 
01562         val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
01563 
01564         /* Get MRRS value from device control */
01565         status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
01566 
01567         if (status == VXGE_HW_OK) {
01568                 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
01569                 val64 &=
01570                     ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
01571                 val64 |=
01572                     VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
01573 
01574                 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
01575         }
01576 
01577         val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
01578         val64 |=
01579             VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
01580                     VXGE_HW_MAX_PAYLOAD_SIZE_512);
01581 
01582         val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
01583         writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
01584 
01585 exit:
01586         return status;
01587 }
01588 
01589 /*
01590  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
01591  * This routine is the initial phase of init which resets the vpath and
01592  * initializes the software support structures.
01593  */
01594 enum vxge_hw_status
01595 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
01596                         struct __vxge_hw_virtualpath *vpath)
01597 {
01598         enum vxge_hw_status status = VXGE_HW_OK;
01599 
01600         vxge_trace();
01601 
01602         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
01603                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
01604                 goto exit;
01605         }
01606 
01607         vpath->vp_id = vp_id;
01608         vpath->vp_open = VXGE_HW_VP_OPEN;
01609         vpath->hldev = hldev;
01610         vpath->vp_reg = hldev->vpath_reg[vp_id];
01611         vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
01612 
01613         __vxge_hw_vpath_reset(hldev, vp_id);
01614 
01615         status = __vxge_hw_vpath_reset_check(vpath);
01616         if (status != VXGE_HW_OK) {
01617                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
01618                 goto exit;
01619         }
01620 
01621         VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
01622                 hldev->tim_int_mask1, vp_id);
01623 
01624         status = __vxge_hw_vpath_initialize(hldev, vp_id);
01625 
01626         if (status != VXGE_HW_OK) {
01627                 __vxge_hw_vp_terminate(hldev, vpath);
01628                 goto exit;
01629         }
01630 
01631         status = __vxge_hw_vpath_mgmt_read(vpath);
01632 exit:
01633         return status;
01634 }
01635 
01636 /*
01637  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
01638  * This routine closes all channels it opened and freeup memory
01639  */
01640 void
01641 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev,
01642                         struct __vxge_hw_virtualpath *vpath)
01643 {
01644         vxge_trace();
01645 
01646         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
01647                 return;
01648 
01649         VXGE_HW_DEVICE_TIM_INT_MASK_RESET(hldev->tim_int_mask0,
01650                 hldev->tim_int_mask1, vpath->vp_id);
01651 
01652         memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
01653 }
01654 
01655 /*
01656  * vxge_hw_vpath_mtu_set - Set MTU.
01657  * Set new MTU value. Example, to use jumbo frames:
01658  * vxge_hw_vpath_mtu_set(my_device, 9600);
01659  */
01660 enum vxge_hw_status
01661 vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu)
01662 {
01663         u64 val64;
01664         enum vxge_hw_status status = VXGE_HW_OK;
01665 
01666         vxge_trace();
01667 
01668         new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
01669 
01670         if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
01671                 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
01672 
01673         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
01674 
01675         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
01676         val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
01677 
01678         writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
01679 
01680         return status;
01681 }
01682 
01683 /*
01684  * vxge_hw_vpath_open - Open a virtual path on a given adapter
01685  * This function is used to open access to virtual path of an
01686  * adapter for offload, GRO operations. This function returns
01687  * synchronously.
01688  */
01689 enum vxge_hw_status
01690 vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath)
01691 {
01692         struct __vxge_hw_virtualpath *vpathh;
01693         enum vxge_hw_status status;
01694 
01695         vxge_trace();
01696 
01697         vpathh = &hldev->virtual_path;
01698 
01699         if (vpath->vp_open == VXGE_HW_VP_OPEN) {
01700                 status = VXGE_HW_ERR_INVALID_STATE;
01701                 goto vpath_open_exit1;
01702         }
01703 
01704         status = __vxge_hw_vp_initialize(hldev, hldev->first_vp_id, vpathh);
01705         if (status != VXGE_HW_OK)
01706                 goto vpath_open_exit1;
01707 
01708         status = __vxge_hw_fifo_create(vpathh, &vpathh->fifoh);
01709         if (status != VXGE_HW_OK)
01710                 goto vpath_open_exit2;
01711 
01712         status = __vxge_hw_ring_create(vpathh, &vpathh->ringh);
01713         if (status != VXGE_HW_OK)
01714                 goto vpath_open_exit3;
01715 
01716         __vxge_hw_vpath_prc_configure(hldev);
01717 
01718         return VXGE_HW_OK;
01719 
01720 vpath_open_exit3:
01721         __vxge_hw_fifo_delete(&vpathh->fifoh);
01722 vpath_open_exit2:
01723         __vxge_hw_vp_terminate(hldev, vpathh);
01724 vpath_open_exit1:
01725         return status;
01726 }
01727 
01728 /*
01729  * vxge_hw_vpath_rx_doorbell_init -  Post the count of the refreshed region
01730  * of RxD list
01731  * @vp: vpath handle
01732  *
01733  * This function decides on the Rxd replenish count depending on the
01734  * descriptor memory that has been allocated to this VPath.
01735  */
01736 void
01737 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath)
01738 {
01739         u64 new_count, val64;
01740 
01741         vxge_trace();
01742 
01743         if (vpath->hldev->titan1) {
01744                 new_count = readq(&vpath->vp_reg->rxdmem_size);
01745                 new_count &= 0x1fff;
01746         } else
01747                 new_count = VXGE_HW_RING_RXD_QWORDS_MODE_1 * 4;
01748 
01749         val64 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
01750 
01751         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val64),
01752                 &vpath->vp_reg->prc_rxd_doorbell);
01753 }
01754 
01755 /*
01756  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
01757  * This function is used to close access to virtual path opened
01758  * earlier.
01759  */
01760 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath)
01761 {
01762         struct __vxge_hw_device *devh = NULL;
01763         u32 vp_id = vpath->vp_id;
01764         enum vxge_hw_status status = VXGE_HW_OK;
01765 
01766         vxge_trace();
01767 
01768         devh = vpath->hldev;
01769 
01770         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
01771                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
01772                 goto vpath_close_exit;
01773         }
01774 
01775         devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
01776 
01777         __vxge_hw_ring_delete(&vpath->ringh);
01778 
01779         __vxge_hw_fifo_delete(&vpath->fifoh);
01780 
01781         __vxge_hw_vp_terminate(devh, vpath);
01782 
01783         vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
01784 
01785 vpath_close_exit:
01786         return status;
01787 }
01788 
01789 /*
01790  * vxge_hw_vpath_reset - Resets vpath
01791  * This function is used to request a reset of vpath
01792  */
01793 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath)
01794 {
01795         enum vxge_hw_status status;
01796         u32 vp_id;
01797 
01798         vxge_trace();
01799 
01800         vp_id = vpath->vp_id;
01801 
01802         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
01803                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
01804                 goto exit;
01805         }
01806 
01807         status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
01808 exit:
01809         return status;
01810 }
01811 
01812 /*
01813  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
01814  * This function poll's for the vpath reset completion and re initializes
01815  * the vpath.
01816  */
01817 enum vxge_hw_status
01818 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath)
01819 {
01820         enum vxge_hw_status status;
01821         struct __vxge_hw_device *hldev;
01822         u32 vp_id;
01823 
01824         vxge_trace();
01825 
01826         vp_id = vpath->vp_id;
01827         hldev = vpath->hldev;
01828 
01829         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
01830                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
01831                 goto exit;
01832         }
01833 
01834         status = __vxge_hw_vpath_reset_check(vpath);
01835         if (status != VXGE_HW_OK)
01836                 goto exit;
01837 
01838         status = __vxge_hw_vpath_initialize(hldev, vp_id);
01839         if (status != VXGE_HW_OK)
01840                 goto exit;
01841 
01842         __vxge_hw_vpath_prc_configure(hldev);
01843 
01844 exit:
01845         return status;
01846 }
01847 
01848 /*
01849  * vxge_hw_vpath_enable - Enable vpath.
01850  * This routine clears the vpath reset thereby enabling a vpath
01851  * to start forwarding frames and generating interrupts.
01852  */
01853 void
01854 vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath)
01855 {
01856         struct __vxge_hw_device *hldev;
01857         u64 val64;
01858 
01859         vxge_trace();
01860 
01861         hldev = vpath->hldev;
01862 
01863         val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
01864                 1 << (16 - vpath->vp_id));
01865 
01866         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
01867                 &hldev->common_reg->cmn_rsthdlr_cfg1);
01868 }