iPXE
vxge_traffic.c
Go to the documentation of this file.
00001 /*
00002  * vxge-traffic.c: iPXE driver for Neterion Inc's X3100 Series 10GbE
00003  *              PCIe I/O Virtualized Server Adapter.
00004  *
00005  * Copyright(c) 2002-2010 Neterion Inc.
00006  *
00007  * This software may be used and distributed according to the terms of
00008  * the GNU General Public License (GPL), incorporated herein by
00009  * reference.  Drivers based on or derived from this code fall under
00010  * the GPL and must retain the authorship, copyright and license
00011  * notice.
00012  *
00013  */
00014 
00015 FILE_LICENCE(GPL2_ONLY);
00016 
00017 #include <ipxe/netdevice.h>
00018 #include <errno.h>
00019 
00020 #include "vxge_traffic.h"
00021 #include "vxge_config.h"
00022 #include "vxge_main.h"
00023 
00024 /*
00025  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
00026  * @vpath: Virtual Path handle.
00027  *
00028  * Enable vpath interrupts. The function is to be executed the last in
00029  * vpath initialization sequence.
00030  *
00031  * See also: vxge_hw_vpath_intr_disable()
00032  */
00033 enum vxge_hw_status
00034 vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)
00035 {
00036         struct vxge_hw_vpath_reg *vp_reg;
00037         enum vxge_hw_status status = VXGE_HW_OK;
00038 
00039         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
00040                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
00041         goto exit;
00042         }
00043 
00044         vp_reg = vpath->vp_reg;
00045 
00046         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
00047 
00048         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00049                                 &vp_reg->general_errors_reg);
00050 
00051         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00052                                 &vp_reg->pci_config_errors_reg);
00053 
00054         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00055                                 &vp_reg->mrpcim_to_vpath_alarm_reg);
00056 
00057         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00058                                 &vp_reg->srpcim_to_vpath_alarm_reg);
00059 
00060         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00061                                 &vp_reg->vpath_ppif_int_status);
00062 
00063         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00064                                 &vp_reg->srpcim_msg_to_vpath_reg);
00065 
00066         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00067                                 &vp_reg->vpath_pcipif_int_status);
00068 
00069         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00070                                 &vp_reg->prc_alarm_reg);
00071 
00072         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00073                                 &vp_reg->wrdma_alarm_status);
00074 
00075         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00076                                 &vp_reg->asic_ntwk_vp_err_reg);
00077 
00078         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00079                                 &vp_reg->xgmac_vp_int_status);
00080 
00081         readq(&vp_reg->vpath_general_int_status);
00082 
00083         /* Mask unwanted interrupts */
00084         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00085                                 &vp_reg->vpath_pcipif_int_mask);
00086 
00087         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00088                                 &vp_reg->srpcim_msg_to_vpath_mask);
00089 
00090         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00091                                 &vp_reg->srpcim_to_vpath_alarm_mask);
00092 
00093         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00094                                 &vp_reg->mrpcim_to_vpath_alarm_mask);
00095 
00096         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00097                                 &vp_reg->pci_config_errors_mask);
00098 
00099         /* Unmask the individual interrupts */
00100         writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
00101                 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
00102                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
00103                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
00104                 &vp_reg->general_errors_mask);
00105 
00106         __vxge_hw_pio_mem_write32_upper(
00107                 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
00108                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
00109                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
00110                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
00111                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
00112                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
00113                 &vp_reg->kdfcctl_errors_mask);
00114 
00115         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
00116 
00117         __vxge_hw_pio_mem_write32_upper(
00118                 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
00119                 &vp_reg->prc_alarm_mask);
00120 
00121         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
00122         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
00123 
00124         if (vpath->hldev->first_vp_id != vpath->vp_id)
00125                 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00126                                 &vp_reg->asic_ntwk_vp_err_mask);
00127         else
00128                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
00129                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT|
00130                         VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK),
00131                         0, 32), &vp_reg->asic_ntwk_vp_err_mask);
00132 
00133         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_general_int_mask);
00134 exit:
00135         return status;
00136 
00137 }
00138 
00139 /*
00140  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
00141  * @vpath: Virtual Path handle.
00142  *
00143  * Disable vpath interrupts. The function is to be executed the last in
00144  * vpath initialization sequence.
00145  *
00146  * See also: vxge_hw_vpath_intr_enable()
00147  */
00148 enum vxge_hw_status
00149 vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
00150 {
00151         enum vxge_hw_status status = VXGE_HW_OK;
00152         struct vxge_hw_vpath_reg __iomem *vp_reg;
00153 
00154         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
00155                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
00156                 goto exit;
00157         }
00158         vp_reg = vpath->vp_reg;
00159 
00160         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00161                         &vp_reg->vpath_general_int_mask);
00162 
00163         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
00164 
00165         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00166                         &vp_reg->general_errors_mask);
00167 
00168         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00169                         &vp_reg->pci_config_errors_mask);
00170 
00171         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00172                         &vp_reg->mrpcim_to_vpath_alarm_mask);
00173 
00174         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00175                         &vp_reg->srpcim_to_vpath_alarm_mask);
00176 
00177         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00178                         &vp_reg->vpath_ppif_int_mask);
00179 
00180         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00181                         &vp_reg->srpcim_msg_to_vpath_mask);
00182 
00183         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00184                         &vp_reg->vpath_pcipif_int_mask);
00185 
00186         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00187                         &vp_reg->wrdma_alarm_mask);
00188 
00189         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00190                         &vp_reg->prc_alarm_mask);
00191 
00192         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00193                         &vp_reg->xgmac_vp_int_mask);
00194 
00195         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
00196                         &vp_reg->asic_ntwk_vp_err_mask);
00197 
00198 exit:
00199         return status;
00200 }
00201 
00202 /**
00203  * vxge_hw_device_mask_all - Mask all device interrupts.
00204  * @hldev: HW device handle.
00205  *
00206  * Mask all device interrupts.
00207  *
00208  * See also: vxge_hw_device_unmask_all()
00209  */
00210 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
00211 {
00212         u64 val64;
00213 
00214         val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
00215                         VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
00216 
00217         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
00218                         &hldev->common_reg->titan_mask_all_int);
00219 
00220         return;
00221 }
00222 
00223 /**
00224  * vxge_hw_device_unmask_all - Unmask all device interrupts.
00225  * @hldev: HW device handle.
00226  *
00227  * Unmask all device interrupts.
00228  *
00229  * See also: vxge_hw_device_mask_all()
00230  */
00231 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
00232 {
00233         u64 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
00234 
00235         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
00236                         &hldev->common_reg->titan_mask_all_int);
00237 
00238         return;
00239 }
00240 
00241 /**
00242  * vxge_hw_device_intr_enable - Enable interrupts.
00243  * @hldev: HW device handle.
00244  *
00245  * Enable Titan interrupts. The function is to be executed the last in
00246  * Titan initialization sequence.
00247  *
00248  * See also: vxge_hw_device_intr_disable()
00249  */
00250 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
00251 {
00252         u64 val64;
00253         u32 val32;
00254 
00255         vxge_hw_device_mask_all(hldev);
00256 
00257         vxge_hw_vpath_intr_enable(&hldev->virtual_path);
00258 
00259         val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
00260                         hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
00261 
00262         if (val64 != 0) {
00263                 writeq(val64, &hldev->common_reg->tim_int_status0);
00264 
00265                 writeq(~val64, &hldev->common_reg->tim_int_mask0);
00266         }
00267 
00268         val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
00269                         hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
00270 
00271         if (val32 != 0) {
00272                 __vxge_hw_pio_mem_write32_upper(val32,
00273                                 &hldev->common_reg->tim_int_status1);
00274 
00275                 __vxge_hw_pio_mem_write32_upper(~val32,
00276                                 &hldev->common_reg->tim_int_mask1);
00277         }
00278 
00279         val64 = readq(&hldev->common_reg->titan_general_int_status);
00280 
00281         /* We have not enabled the top level interrupt yet.
00282          * This will be controlled from vxge_irq() entry api.
00283          */
00284         return;
00285 }
00286 
00287 /**
00288  * vxge_hw_device_intr_disable - Disable Titan interrupts.
00289  * @hldev: HW device handle.
00290  *
00291  * Disable Titan interrupts.
00292  *
00293  * See also: vxge_hw_device_intr_enable()
00294  */
00295 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
00296 {
00297         vxge_hw_device_mask_all(hldev);
00298 
00299         /* mask all the tim interrupts */
00300         writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
00301         __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
00302                                 &hldev->common_reg->tim_int_mask1);
00303 
00304         vxge_hw_vpath_intr_disable(&hldev->virtual_path);
00305 
00306         return;
00307 }
00308 
00309 /**
00310  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
00311  * @ring: Handle to the ring object used for receive
00312  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
00313  *
00314  * Post descriptor on the ring.
00315  * Prior to posting the descriptor should be filled in accordance with
00316  * Host/Titan interface specification for a given service (LL, etc.).
00317  */
00318 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused,
00319                                 struct vxge_hw_ring_rxd_1 *rxdp)
00320 {
00321         rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
00322 }
00323 
00324 /**
00325  * __vxge_hw_non_offload_db_post - Post non offload doorbell
00326  *
00327  * @fifo: fifohandle
00328  * @txdl_ptr: The starting location of the TxDL in host memory
00329  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
00330  *
00331  * This function posts a non-offload doorbell to doorbell FIFO
00332  *
00333  */
00334 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
00335         u64 txdl_ptr, u32 num_txds)
00336 {
00337         writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
00338                 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds),
00339                 &fifo->nofl_db->control_0);
00340 
00341         wmb();
00342 
00343         writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
00344 
00345         wmb();
00346 }
00347 
00348 /**
00349  * vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
00350  *
00351  * @fifo: tx channel handle
00352  */
00353 struct vxge_hw_fifo_txd *
00354         vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo)
00355 {
00356         struct vxge_hw_fifo_txd *txdp;
00357 
00358         txdp = fifo->txdl + fifo->sw_offset;
00359         if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
00360                 vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
00361                                 __func__, __LINE__, fifo->sw_offset);
00362                 return NULL;
00363         }
00364 
00365         return txdp;
00366 }
00367 /**
00368  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
00369  * descriptor.
00370  * @fifo: Handle to the fifo object used for non offload send
00371  * @txdlh: Descriptor handle.
00372  * @iob: data buffer.
00373  */
00374 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
00375                         struct vxge_hw_fifo_txd *txdp,
00376                         struct io_buffer *iob)
00377 {
00378         txdp->control_0 = VXGE_HW_FIFO_TXD_GATHER_CODE(
00379                         VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST);
00380         txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
00381 
00382         txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
00383         txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
00384 
00385         txdp->host_control = (intptr_t)iob;
00386         txdp->buffer_pointer = virt_to_bus(iob->data);
00387 }
00388 
00389 /**
00390  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
00391  * @fifo: Handle to the fifo object used for non offload send
00392  * @txdp: Tx Descriptor
00393  *
00394  * Post descriptor on the 'fifo' type channel for transmission.
00395  * Prior to posting the descriptor should be filled in accordance with
00396  * Host/Titan interface specification for a given service (LL, etc.).
00397  *
00398  */
00399 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo,
00400                         struct vxge_hw_fifo_txd *txdp)
00401 {
00402         txdp->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
00403 
00404         __vxge_hw_non_offload_db_post(fifo, (u64) virt_to_bus(txdp), 0);
00405 
00406         vxge_hw_fifo_txd_offset_up(&fifo->sw_offset);
00407 }
00408 
00409 /*
00410  * __vxge_hw_vpath_alarm_process - Process Alarms.
00411  * @vpath: Virtual Path.
00412  * @skip_alarms: Do not clear the alarms
00413  *
00414  * Process vpath alarms.
00415  *
00416  */
00417 static enum vxge_hw_status __vxge_hw_vpath_alarm_process(
00418                         struct __vxge_hw_virtualpath *vpath)
00419 {
00420         u64 val64;
00421         u64 alarm_status;
00422         enum vxge_hw_status status = VXGE_HW_OK;
00423         struct __vxge_hw_device *hldev = NULL;
00424         struct vxge_hw_vpath_reg *vp_reg;
00425 
00426         hldev = vpath->hldev;
00427         vp_reg = vpath->vp_reg;
00428         alarm_status = readq(&vp_reg->vpath_general_int_status);
00429 
00430         if (alarm_status == VXGE_HW_ALL_FOXES) {
00431 
00432                 vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
00433                         hldev->ndev->name, __func__, __LINE__);
00434                 status = VXGE_HW_ERR_SLOT_FREEZE;
00435                 goto out;
00436         }
00437 
00438         if (alarm_status & ~(
00439                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
00440                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
00441                 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
00442                 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
00443 
00444                 vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
00445                         hldev->ndev->name, __func__, __LINE__);
00446                 status = VXGE_HW_FAIL;
00447                 goto out;
00448         }
00449 
00450         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
00451 
00452                 val64 = readq(&vp_reg->xgmac_vp_int_status);
00453 
00454                 if (val64 &
00455                 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
00456 
00457                         val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
00458 
00459                         if (((val64 &
00460                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
00461                             (!(val64 &
00462                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
00463                             ((val64 &
00464                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
00465                                 && (!(val64 &
00466                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
00467                         ))) {
00468                                 writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
00469                                         &vp_reg->asic_ntwk_vp_err_mask);
00470 
00471                                 netdev_link_down(hldev->ndev);
00472                                 vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
00473                                         hldev->ndev->name, __func__, __LINE__);
00474                         }
00475 
00476                         if (((val64 &
00477                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
00478                             (!(val64 &
00479                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
00480                             ((val64 &
00481                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
00482                                 && (!(val64 &
00483                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
00484                         ))) {
00485                                 writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
00486                                         &vp_reg->asic_ntwk_vp_err_mask);
00487 
00488                                 netdev_link_up(hldev->ndev);
00489                                 vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
00490                                         hldev->ndev->name, __func__, __LINE__);
00491                         }
00492 
00493                         writeq(VXGE_HW_INTR_MASK_ALL,
00494                                 &vp_reg->asic_ntwk_vp_err_reg);
00495                 }
00496         } else {
00497                 vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
00498                                 hldev->ndev->name, __func__, __LINE__,
00499                                 alarm_status);
00500         }
00501 out:
00502         return status;
00503 }
00504 
00505 /**
00506  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
00507  * condition that has caused the Tx and RX interrupt.
00508  * @hldev: HW device.
00509  *
00510  * Acknowledge (that is, clear) the condition that has caused
00511  * the Tx and Rx interrupt.
00512  * See also: vxge_hw_device_begin_irq(),
00513  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
00514  */
00515 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
00516 {
00517 
00518         if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
00519                         (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
00520                 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
00521                         hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
00522                         &hldev->common_reg->tim_int_status0);
00523         }
00524 
00525         if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
00526                         (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
00527                 __vxge_hw_pio_mem_write32_upper(
00528                         (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
00529                         hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
00530                         &hldev->common_reg->tim_int_status1);
00531         }
00532 
00533         return;
00534 }
00535 
00536 
00537 /**
00538  * vxge_hw_device_begin_irq - Begin IRQ processing.
00539  * @hldev: HW device handle.
00540  *
00541  * The function performs two actions, It first checks whether (shared IRQ) the
00542  * interrupt was raised by the device. Next, it masks the device interrupts.
00543  *
00544  * Note:
00545  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
00546  * bridge. Therefore, two back-to-back interrupts are potentially possible.
00547  *
00548  * Returns: 0, if the interrupt is not "ours" (note that in this case the
00549  * device remain enabled).
00550  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
00551  * status.
00552  */
00553 enum vxge_hw_status
00554 vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev)
00555 {
00556         u64 val64;
00557         u64 adapter_status;
00558         u64 vpath_mask;
00559         enum vxge_hw_status ret = VXGE_HW_OK;
00560 
00561         val64 = readq(&hldev->common_reg->titan_general_int_status);
00562 
00563         if (!val64) {
00564                 ret = VXGE_HW_ERR_WRONG_IRQ;
00565                 goto exit;
00566         }
00567 
00568         if (val64 == VXGE_HW_ALL_FOXES) {
00569 
00570                 adapter_status = readq(&hldev->common_reg->adapter_status);
00571 
00572                 if (adapter_status == VXGE_HW_ALL_FOXES) {
00573 
00574                         vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
00575                                 "occurred\n", hldev->ndev->name,
00576                                 __func__, __LINE__);
00577                         ret = VXGE_HW_ERR_SLOT_FREEZE;
00578                         goto exit;
00579                 }
00580         }
00581 
00582         vpath_mask = hldev->vpaths_deployed >>
00583                                 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
00584         if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
00585                                 vpath_mask))
00586                 vxge_hw_device_clear_tx_rx(hldev);
00587 
00588         if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)
00589                 ret = __vxge_hw_vpath_alarm_process(&hldev->virtual_path);
00590 
00591 exit:
00592         return ret;
00593 }
00594 
00595 /**
00596  * vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive
00597  * descriptors posted.
00598  * @ring: Handle to the ring object used for receive
00599  *
00600  * The function writes the number of qwords of rxds posted during replishment.
00601  * Since the function is called frequently, a flush is not required to post the
00602  * write transaction. At the very least, the previous write will be flushed
00603  * once the subsequent write is made.
00604  *
00605  * Returns: None.
00606  */
00607 void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
00608 {
00609         u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
00610                 VXGE_HW_RING_RXD_QWORDS_MODE_1;
00611 
00612         ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
00613 
00614         ring->total_db_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
00615 
00616         if (ring->total_db_cnt >= rxds_qw_per_block) {
00617                 /* For each block add 4 more qwords */
00618                 ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
00619 
00620                 /* Reset total count */
00621                 ring->total_db_cnt -= rxds_qw_per_block;
00622         }
00623 
00624         if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
00625                 wmb();
00626                 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(
00627                         ring->doorbell_cnt),
00628                         &ring->vp_reg->prc_rxd_doorbell);
00629                 ring->doorbell_cnt = 0;
00630         }
00631 }
00632 
00633 /**
00634  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
00635  * descriptors and process the same.
00636  * @ring: Handle to the ring object used for receive
00637  *
00638  * The function polls the Rx for the completed  descriptors.
00639  */
00640 #define ETH_FCS_LEN     4
00641 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
00642 {
00643         struct __vxge_hw_device *hldev;
00644         enum vxge_hw_status status = VXGE_HW_OK;
00645         struct vxge_hw_ring_rxd_1 *rxd;
00646         unsigned int len;
00647         enum vxge_hw_ring_tcode tcode;
00648         struct io_buffer *rx_iob, *iobuf = NULL;
00649         u16 poll_count = 0;
00650 
00651         hldev = ring->vpathh->hldev;
00652 
00653         do {
00654                 rxd = &ring->rxdl->rxd[ring->rxd_offset];
00655                 tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
00656 
00657                 /* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
00658                  * possible the ownership bit still set to adapter
00659                  */
00660                 if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
00661                         && (tcode == VXGE_HW_RING_T_CODE_OK)) {
00662 
00663                         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
00664                         goto err0;
00665                 }
00666 
00667                 vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
00668                         hldev->ndev->name, ring->rxd_offset);
00669 
00670                 iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
00671 
00672                 if (tcode != VXGE_HW_RING_T_CODE_OK) {
00673                         netdev_rx_err(hldev->ndev, NULL, -EINVAL);
00674                         vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
00675                                 __func__, __LINE__, tcode);
00676                         status = VXGE_HW_FAIL;
00677                         goto err1;
00678                 }
00679 
00680                 len = VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxd->control_1);
00681                 len -= ETH_FCS_LEN;
00682 
00683                 rx_iob = alloc_iob(len);
00684                 if (!rx_iob) {
00685                         netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
00686                         vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
00687                                 __func__, __LINE__);
00688                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
00689                         goto err1;
00690                 }
00691 
00692                 memcpy(iob_put(rx_iob, len), iobuf->data, len);
00693                 /* Add this packet to the receive queue. */
00694                 netdev_rx(hldev->ndev, rx_iob);
00695 
00696 err1:
00697                 /* repost the rxd */
00698                 rxd->control_0 = rxd->control_1 = 0;
00699                 vxge_hw_ring_rxd_1b_set(rxd, iobuf,
00700                                 VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
00701                 vxge_hw_ring_rxd_post(ring, rxd);
00702 
00703                 /* repost the qword count for doorbell */
00704                 vxge_hw_vpath_doorbell_rx(ring);
00705 
00706                 /* increment the descriptor offset */
00707                 vxge_hw_ring_rxd_offset_up(&ring->rxd_offset);
00708 
00709         } while (++poll_count < ring->rx_poll_weight);
00710 err0:
00711         return status;
00712 }
00713 
00714 /**
00715  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
00716  * the same.
00717  * @fifo: Handle to the fifo object used for non offload send
00718  *
00719  * The function polls the Tx for the completed  descriptors and calls
00720  * the driver via supplied completion callback.
00721  */
00722 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo)
00723 {
00724         enum vxge_hw_status status = VXGE_HW_OK;
00725         struct vxge_hw_fifo_txd *txdp;
00726 
00727         txdp = fifo->txdl + fifo->hw_offset;
00728         if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
00729                 && (txdp->host_control)) {
00730 
00731                 vxge_xmit_compl(fifo, txdp,
00732                         VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0));
00733 
00734                 vxge_hw_fifo_txd_offset_up(&fifo->hw_offset);
00735         }
00736 
00737         return status;
00738 }