iPXE
tg3.c
Go to the documentation of this file.
00001 
00002 FILE_LICENCE ( GPL2_ONLY );
00003 
00004 #include <mii.h>
00005 #include <stdio.h>
00006 #include <errno.h>
00007 #include <unistd.h>
00008 #include <byteswap.h>
00009 #include <ipxe/pci.h>
00010 #include <ipxe/iobuf.h>
00011 #include <ipxe/timer.h>
00012 #include <ipxe/malloc.h>
00013 #include <ipxe/if_ether.h>
00014 #include <ipxe/ethernet.h>
00015 #include <ipxe/netdevice.h>
00016 
00017 #include "tg3.h"
00018 
00019 #define TG3_DEF_RX_MODE         0
00020 #define TG3_DEF_TX_MODE         0
00021 
00022 static void tg3_refill_prod_ring(struct tg3 *tp);
00023 
00024 /* Do not place this n-ring entries value into the tp struct itself,
00025  * we really want to expose these constants to GCC so that modulo et
00026  * al.  operations are done with shifts and masks instead of with
00027  * hw multiply/modulo instructions.  Another solution would be to
00028  * replace things like '% foo' with '& (foo - 1)'.
00029  */
00030 
00031 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
00032                                  TG3_TX_RING_SIZE)
00033 
00034 /* FIXME: does TG3_RX_RET_MAX_SIZE_5705 work for all cards? */
00035 #define TG3_RX_RCB_RING_BYTES(tp) \
00036         (sizeof(struct tg3_rx_buffer_desc) * (TG3_RX_RET_MAX_SIZE_5705))
00037 
00038 #define TG3_RX_STD_RING_BYTES(tp) \
00039         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
00040 
00041 void tg3_rx_prodring_fini(struct tg3_rx_prodring_set *tpr)
00042 {       DBGP("%s\n", __func__);
00043 
00044         if (tpr->rx_std) {
00045                 free_dma(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
00046                 tpr->rx_std = NULL;
00047         }
00048 }
00049 
00050 /*
00051  * Must not be invoked with interrupt sources disabled and
00052  * the hardware shutdown down.
00053  */
00054 static void tg3_free_consistent(struct tg3 *tp)
00055 {       DBGP("%s\n", __func__);
00056 
00057         if (tp->tx_ring) {
00058                 free_dma(tp->tx_ring, TG3_TX_RING_BYTES);
00059                 tp->tx_ring = NULL;
00060         }
00061 
00062         free(tp->tx_buffers);
00063         tp->tx_buffers = NULL;
00064 
00065         if (tp->rx_rcb) {
00066                 free_dma(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
00067                 tp->rx_rcb_mapping = 0;
00068                 tp->rx_rcb = NULL;
00069         }
00070 
00071         tg3_rx_prodring_fini(&tp->prodring);
00072 
00073         if (tp->hw_status) {
00074                 free_dma(tp->hw_status, TG3_HW_STATUS_SIZE);
00075                 tp->status_mapping = 0;
00076                 tp->hw_status = NULL;
00077         }
00078 }
00079 
00080 /*
00081  * Must not be invoked with interrupt sources disabled and
00082  * the hardware shutdown down.  Can sleep.
00083  */
00084 int tg3_alloc_consistent(struct tg3 *tp)
00085 {       DBGP("%s\n", __func__);
00086 
00087         struct tg3_hw_status *sblk;
00088         struct tg3_rx_prodring_set *tpr = &tp->prodring;
00089 
00090         tp->hw_status = malloc_dma(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
00091         if (!tp->hw_status) {
00092                 DBGC(tp->dev, "hw_status alloc failed\n");
00093                 goto err_out;
00094         }
00095         tp->status_mapping = virt_to_bus(tp->hw_status);
00096 
00097         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
00098         sblk = tp->hw_status;
00099 
00100         tpr->rx_std = malloc_dma(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
00101         if (!tpr->rx_std) {
00102                 DBGC(tp->dev, "rx prodring alloc failed\n");
00103                 goto err_out;
00104         }
00105         tpr->rx_std_mapping = virt_to_bus(tpr->rx_std);
00106         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
00107 
00108         tp->tx_buffers = zalloc(sizeof(struct ring_info) * TG3_TX_RING_SIZE);
00109         if (!tp->tx_buffers)
00110                 goto err_out;
00111 
00112         tp->tx_ring = malloc_dma(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
00113         if (!tp->tx_ring)
00114                 goto err_out;
00115         tp->tx_desc_mapping = virt_to_bus(tp->tx_ring);
00116 
00117         /*
00118          * When RSS is enabled, the status block format changes
00119          * slightly.  The "rx_jumbo_consumer", "reserved",
00120          * and "rx_mini_consumer" members get mapped to the
00121          * other three rx return ring producer indexes.
00122          */
00123 
00124         tp->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
00125 
00126         tp->rx_rcb = malloc_dma(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
00127         if (!tp->rx_rcb)
00128                 goto err_out;
00129         tp->rx_rcb_mapping = virt_to_bus(tp->rx_rcb);
00130 
00131         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
00132 
00133         return 0;
00134 
00135 err_out:
00136         tg3_free_consistent(tp);
00137         return -ENOMEM;
00138 }
00139 
00140 #define TG3_RX_STD_BUFF_RING_BYTES(tp) \
00141         (sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
00142 #define TG3_RX_STD_RING_BYTES(tp) \
00143         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
00144 
00145 /* Initialize rx rings for packet processing.
00146  *
00147  * The chip has been shut down and the driver detached from
00148  * the networking, so no interrupts or new tx packets will
00149  * end up in the driver.
00150  */
00151 static int tg3_rx_prodring_alloc(struct tg3 __unused *tp,
00152                                  struct tg3_rx_prodring_set *tpr)
00153 {       DBGP("%s\n", __func__);
00154 
00155         u32 i;
00156 
00157         tpr->rx_std_cons_idx = 0;
00158         tpr->rx_std_prod_idx = 0;
00159 
00160         /* Initialize invariants of the rings, we only set this
00161          * stuff once.  This works because the card does not
00162          * write into the rx buffer posting rings.
00163          */
00164         /* FIXME: does TG3_RX_STD_MAX_SIZE_5700 work on all cards? */
00165         for (i = 0; i < TG3_RX_STD_MAX_SIZE_5700; i++) {
00166                 struct tg3_rx_buffer_desc *rxd;
00167 
00168                 rxd = &tpr->rx_std[i];
00169                 rxd->idx_len = (TG3_RX_STD_DMA_SZ - 64 - 2) << RXD_LEN_SHIFT;
00170                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
00171                 rxd->opaque = (RXD_OPAQUE_RING_STD |
00172                                (i << RXD_OPAQUE_INDEX_SHIFT));
00173         }
00174 
00175         return 0;
00176 }
00177 
00178 static void tg3_rx_iob_free(struct io_buffer *iobs[], int i)
00179 {       DBGP("%s\n", __func__);
00180 
00181         if (iobs[i] == NULL)
00182                 return;
00183 
00184         free_iob(iobs[i]);
00185         iobs[i] = NULL;
00186 }
00187 
00188 static void tg3_rx_prodring_free(struct tg3_rx_prodring_set *tpr)
00189 {       DBGP("%s\n", __func__);
00190 
00191         unsigned int i;
00192 
00193         for (i = 0; i < TG3_DEF_RX_RING_PENDING; i++)
00194                 tg3_rx_iob_free(tpr->rx_iobufs, i);
00195 }
00196 
00197 /* Initialize tx/rx rings for packet processing.
00198  *
00199  * The chip has been shut down and the driver detached from
00200  * the networking, so no interrupts or new tx packets will
00201  * end up in the driver.
00202  */
00203 int tg3_init_rings(struct tg3 *tp)
00204 {       DBGP("%s\n", __func__);
00205 
00206         /* Free up all the SKBs. */
00207 ///     tg3_free_rings(tp);
00208 
00209         tp->last_tag = 0;
00210         tp->last_irq_tag = 0;
00211         tp->hw_status->status = 0;
00212         tp->hw_status->status_tag = 0;
00213         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
00214 
00215         tp->tx_prod = 0;
00216         tp->tx_cons = 0;
00217         if (tp->tx_ring)
00218                 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
00219 
00220         tp->rx_rcb_ptr = 0;
00221         if (tp->rx_rcb)
00222                 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
00223 
00224         if (tg3_rx_prodring_alloc(tp, &tp->prodring)) {
00225                 DBGC(tp->dev, "tg3_rx_prodring_alloc() failed\n");
00226                 tg3_rx_prodring_free(&tp->prodring);
00227                 return -ENOMEM;
00228         }
00229 
00230         return 0;
00231 }
00232 
00233 static int tg3_open(struct net_device *dev)
00234 {       DBGP("%s\n", __func__);
00235 
00236         struct tg3 *tp = netdev_priv(dev);
00237         struct tg3_rx_prodring_set *tpr = &tp->prodring;
00238         int err = 0;
00239 
00240         tg3_set_power_state_0(tp);
00241 
00242         /* Initialize MAC address and backoff seed. */
00243         __tg3_set_mac_addr(tp, 0);
00244 
00245         err = tg3_alloc_consistent(tp);
00246         if (err)
00247                 return err;
00248 
00249         tpr->rx_std_iob_cnt = 0;
00250 
00251         err = tg3_init_hw(tp, 1);
00252         if (err != 0)
00253                 DBGC(tp->dev, "tg3_init_hw failed: %s\n", strerror(err));
00254         else
00255                 tg3_refill_prod_ring(tp);
00256 
00257         return err;
00258 }
00259 
00260 static inline u32 tg3_tx_avail(struct tg3 *tp)
00261 {       DBGP("%s\n", __func__);
00262 
00263         /* Tell compiler to fetch tx indices from memory. */
00264         barrier();
00265         return TG3_DEF_TX_RING_PENDING -
00266                ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1));
00267 }
00268 
00269 #if 0
00270 /**
00271  *
00272  * Prints all registers that could cause a set ERR bit in hw_status->status
00273  */
00274 static void tg3_dump_err_reg(struct tg3 *tp)
00275 {       DBGP("%s\n", __func__);
00276 
00277                 printf("FLOW_ATTN: %#08x\n", tr32(HOSTCC_FLOW_ATTN));
00278                 printf("MAC ATTN: %#08x\n", tr32(MAC_STATUS));
00279                 printf("MSI STATUS: %#08x\n", tr32(MSGINT_STATUS));
00280                 printf("DMA RD: %#08x\n", tr32(RDMAC_STATUS));
00281                 printf("DMA WR: %#08x\n", tr32(WDMAC_STATUS));
00282                 printf("TX CPU STATE: %#08x\n", tr32(TX_CPU_STATE));
00283                 printf("RX CPU STATE: %#08x\n", tr32(RX_CPU_STATE));
00284 }
00285 
00286 static void __unused tw32_mailbox2(struct tg3 *tp, uint32_t reg, uint32_t val)
00287 {       DBGP("%s\n", __func__);
00288 
00289         tw32_mailbox(reg, val);
00290         tr32(reg);
00291 }
00292 #endif
00293 
00294 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
00295 
00296 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
00297  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
00298  */
00299 static int tg3_transmit(struct net_device *dev, struct io_buffer *iob)
00300 {       DBGP("%s\n", __func__);
00301 
00302         struct tg3 *tp = netdev_priv(dev);
00303         u32 len, entry;
00304         dma_addr_t mapping;
00305 
00306         if (tg3_tx_avail(tp) < 1) {
00307                 DBGC(dev, "Transmit ring full\n");
00308                 return -ENOBUFS;
00309         }
00310 
00311         entry = tp->tx_prod;
00312 
00313         iob_pad(iob, ETH_ZLEN);
00314         mapping = virt_to_bus(iob->data);
00315         len = iob_len(iob);
00316 
00317         tp->tx_buffers[entry].iob = iob;
00318 
00319         tg3_set_txd(tp, entry, mapping, len, TXD_FLAG_END);
00320 
00321         entry = NEXT_TX(entry);
00322 
00323         /* Packets are ready, update Tx producer idx local and on card. */
00324         tw32_tx_mbox(tp->prodmbox, entry);
00325 
00326         tp->tx_prod = entry;
00327 
00328         mb();
00329 
00330         return 0;
00331 }
00332 
00333 static void tg3_tx_complete(struct net_device *dev)
00334 {       DBGP("%s\n", __func__);
00335 
00336         struct tg3 *tp = netdev_priv(dev);
00337         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
00338         u32 sw_idx = tp->tx_cons;
00339 
00340         while (sw_idx != hw_idx) {
00341                 struct io_buffer *iob = tp->tx_buffers[sw_idx].iob;
00342 
00343                 DBGC2(dev, "Transmitted packet: %zd bytes\n", iob_len(iob));
00344 
00345                 netdev_tx_complete(dev, iob);
00346                 sw_idx = NEXT_TX(sw_idx);
00347         }
00348 
00349         tp->tx_cons = sw_idx;
00350 }
00351 
00352 #define TG3_RX_STD_BUFF_RING_BYTES(tp) \
00353         (sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
00354 #define TG3_RX_STD_RING_BYTES(tp) \
00355         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
00356 
00357 /* Returns 0 or < 0 on error.
00358  *
00359  * We only need to fill in the address because the other members
00360  * of the RX descriptor are invariant, see tg3_init_rings.
00361  *
00362  * Note the purposeful assymetry of cpu vs. chip accesses.  For
00363  * posting buffers we only dirty the first cache line of the RX
00364  * descriptor (containing the address).  Whereas for the RX status
00365  * buffers the cpu only reads the last cacheline of the RX descriptor
00366  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
00367  */
00368 static int tg3_alloc_rx_iob(struct tg3_rx_prodring_set *tpr, u32 dest_idx_unmasked)
00369 {       DBGP("%s\n", __func__);
00370 
00371         struct tg3_rx_buffer_desc *desc;
00372         struct io_buffer *iob;
00373         dma_addr_t mapping;
00374         int dest_idx, iob_idx;
00375 
00376         dest_idx = dest_idx_unmasked & (TG3_RX_STD_MAX_SIZE_5700 - 1);
00377         desc = &tpr->rx_std[dest_idx];
00378 
00379         /* Do not overwrite any of the map or rp information
00380          * until we are sure we can commit to a new buffer.
00381          *
00382          * Callers depend upon this behavior and assume that
00383          * we leave everything unchanged if we fail.
00384          */
00385         iob = alloc_iob(TG3_RX_STD_DMA_SZ);
00386         if (iob == NULL)
00387                 return -ENOMEM;
00388 
00389         iob_idx = dest_idx % TG3_DEF_RX_RING_PENDING;
00390         tpr->rx_iobufs[iob_idx] = iob;
00391 
00392         mapping = virt_to_bus(iob->data);
00393 
00394         desc->addr_hi = ((u64)mapping >> 32);
00395         desc->addr_lo = ((u64)mapping & 0xffffffff);
00396 
00397         return 0;
00398 }
00399 
00400 static void tg3_refill_prod_ring(struct tg3 *tp)
00401 {       DBGP("%s\n", __func__);
00402 
00403         struct tg3_rx_prodring_set *tpr = &tp->prodring;
00404         int idx = tpr->rx_std_prod_idx;
00405 
00406         DBGCP(tp->dev, "%s\n", __func__);
00407 
00408         while (tpr->rx_std_iob_cnt < TG3_DEF_RX_RING_PENDING) {
00409                 if (tpr->rx_iobufs[idx % TG3_DEF_RX_RING_PENDING] == NULL) {
00410                         if (tg3_alloc_rx_iob(tpr, idx) < 0) {
00411                                 DBGC(tp->dev, "alloc_iob() failed for descriptor %d\n", idx);
00412                                 break;
00413                         }
00414                         DBGC2(tp->dev, "allocated iob_buffer for descriptor %d\n", idx);
00415                 }
00416 
00417                 idx = (idx + 1) % TG3_RX_STD_MAX_SIZE_5700;
00418                 tpr->rx_std_iob_cnt++;
00419         }
00420 
00421         if ((u32)idx != tpr->rx_std_prod_idx) {
00422                 tpr->rx_std_prod_idx = idx;
00423                 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
00424         }
00425 }
00426 
00427 static void tg3_rx_complete(struct net_device *dev)
00428 {       DBGP("%s\n", __func__);
00429 
00430         struct tg3 *tp = netdev_priv(dev);
00431 
00432         u32 sw_idx = tp->rx_rcb_ptr;
00433         u16 hw_idx;
00434         struct tg3_rx_prodring_set *tpr = &tp->prodring;
00435 
00436         hw_idx = *(tp->rx_rcb_prod_idx);
00437 
00438         while (sw_idx != hw_idx) {
00439                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
00440                 u32 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
00441                 int iob_idx = desc_idx % TG3_DEF_RX_RING_PENDING;
00442                 struct io_buffer *iob = tpr->rx_iobufs[iob_idx];
00443                 unsigned int len;
00444 
00445                 DBGC2(dev, "RX - desc_idx: %d sw_idx: %d hw_idx: %d\n", desc_idx, sw_idx, hw_idx);
00446 
00447                 assert(iob != NULL);
00448 
00449                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
00450                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
00451                         /* drop packet */
00452                         DBGC(dev, "Corrupted packet received\n");
00453                         netdev_rx_err(dev, iob, -EINVAL);
00454                 } else {
00455                         len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
00456                                 ETH_FCS_LEN;
00457                         iob_put(iob, len);
00458                         netdev_rx(dev, iob);
00459 
00460                         DBGC2(dev, "Received packet: %d bytes %d %d\n", len, sw_idx, hw_idx);
00461                 }
00462 
00463                 sw_idx++;
00464                 sw_idx &= TG3_RX_RET_MAX_SIZE_5705 - 1;
00465 
00466                 tpr->rx_iobufs[iob_idx] = NULL;
00467                 tpr->rx_std_iob_cnt--;
00468         }
00469 
00470         if (tp->rx_rcb_ptr != sw_idx) {
00471                 tw32_rx_mbox(tp->consmbox, sw_idx);
00472                 tp->rx_rcb_ptr = sw_idx;
00473         }
00474 
00475         tg3_refill_prod_ring(tp);
00476 }
00477 
00478 static void tg3_poll(struct net_device *dev)
00479 {       DBGP("%s\n", __func__);
00480 
00481         struct tg3 *tp = netdev_priv(dev);
00482 
00483         /* ACK interrupts */
00484         /*
00485          *tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00);
00486          */
00487         tp->hw_status->status &= ~SD_STATUS_UPDATED;
00488 
00489         mb();
00490 
00491         tg3_poll_link(tp);
00492         tg3_tx_complete(dev);
00493         tg3_rx_complete(dev);
00494 }
00495 
00496 static void tg3_close(struct net_device *dev)
00497 {       DBGP("%s\n", __func__);
00498 
00499         struct tg3 *tp = netdev_priv(dev);
00500 
00501         DBGP("%s\n", __func__);
00502 
00503         tg3_halt(tp);
00504         tg3_rx_prodring_free(&tp->prodring);
00505         tg3_flag_clear(tp, INIT_COMPLETE);
00506 
00507         tg3_free_consistent(tp);
00508 
00509 }
00510 
00511 static void tg3_irq(struct net_device *dev, int enable)
00512 {       DBGP("%s\n", __func__);
00513 
00514         struct tg3 *tp = netdev_priv(dev);
00515 
00516         DBGP("%s: %d\n", __func__, enable);
00517 
00518         if (enable)
00519                 tg3_enable_ints(tp);
00520         else
00521                 tg3_disable_ints(tp);
00522 }
00523 
00524 static struct net_device_operations tg3_netdev_ops = {
00525         .open = tg3_open,
00526         .close = tg3_close,
00527         .poll = tg3_poll,
00528         .transmit = tg3_transmit,
00529         .irq = tg3_irq,
00530 };
00531 
00532 #define TEST_BUFFER_SIZE        0x2000
00533 
00534 int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device);
00535 void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val);
00536 
00537 static int tg3_test_dma(struct tg3 *tp)
00538 {       DBGP("%s\n", __func__);
00539 
00540         dma_addr_t buf_dma;
00541         u32 *buf;
00542         int ret = 0;
00543 
00544         buf = malloc_dma(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
00545         if (!buf) {
00546                 ret = -ENOMEM;
00547                 goto out_nofree;
00548         }
00549         buf_dma = virt_to_bus(buf);
00550         DBGC2(tp->dev, "dma test buffer, virt: %p phys: %#016lx\n", buf, buf_dma);
00551 
00552         if (tg3_flag(tp, 57765_PLUS)) {
00553                 tp->dma_rwctrl = DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
00554                 goto out;
00555         }
00556 
00557         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
00558                          (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
00559 
00560         if (tg3_flag(tp, PCI_EXPRESS)) {
00561                 /* DMA read watermark not used on PCIE */
00562                 tp->dma_rwctrl |= 0x00180000;
00563         } else if (!tg3_flag(tp, PCIX_MODE)) {
00564                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
00565                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
00566                         tp->dma_rwctrl |= 0x003f0000;
00567                 else
00568                         tp->dma_rwctrl |= 0x003f000f;
00569         } else {
00570                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
00571                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
00572                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
00573                         u32 read_water = 0x7;
00574 
00575                         if (ccval == 0x6 || ccval == 0x7)
00576                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
00577 
00578                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
00579                                 read_water = 4;
00580                         /* Set bit 23 to enable PCIX hw bug fix */
00581                         tp->dma_rwctrl |=
00582                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
00583                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
00584                                 (1 << 23);
00585                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
00586                         /* 5780 always in PCIX mode */
00587                         tp->dma_rwctrl |= 0x00144000;
00588                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
00589                         /* 5714 always in PCIX mode */
00590                         tp->dma_rwctrl |= 0x00148000;
00591                 } else {
00592                         tp->dma_rwctrl |= 0x001b000f;
00593                 }
00594         }
00595 
00596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
00597             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
00598                 tp->dma_rwctrl &= 0xfffffff0;
00599 
00600         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
00601             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
00602                 /* Remove this if it causes problems for some boards. */
00603                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
00604 
00605                 /* On 5700/5701 chips, we need to set this bit.
00606                  * Otherwise the chip will issue cacheline transactions
00607                  * to streamable DMA memory with not all the byte
00608                  * enables turned on.  This is an error on several
00609                  * RISC PCI controllers, in particular sparc64.
00610                  *
00611                  * On 5703/5704 chips, this bit has been reassigned
00612                  * a different meaning.  In particular, it is used
00613                  * on those chips to enable a PCI-X workaround.
00614                  */
00615                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
00616         }
00617 
00618         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
00619 
00620 #if 0
00621         /* Unneeded, already done by tg3_get_invariants.  */
00622         tg3_switch_clocks(tp);
00623 #endif
00624 
00625         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
00626             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
00627                 goto out;
00628 
00629         /* It is best to perform DMA test with maximum write burst size
00630          * to expose the 5700/5701 write DMA bug.
00631          */
00632         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
00633         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
00634 
00635         while (1) {
00636                 u32 *p = buf, i;
00637 
00638                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
00639                         p[i] = i;
00640 
00641                 /* Send the buffer to the chip. */
00642                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
00643                 if (ret) {
00644                         DBGC(&tp->pdev->dev,
00645                                 "%s: Buffer write failed. err = %d\n",
00646                                 __func__, ret);
00647                         break;
00648                 }
00649 
00650                 /* validate data reached card RAM correctly. */
00651                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
00652                         u32 val;
00653                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
00654                         if (le32_to_cpu(val) != p[i]) {
00655                                 DBGC(&tp->pdev->dev,
00656                                         "%s: Buffer corrupted on device! "
00657                                         "(%d != %d)\n", __func__, val, i);
00658                                 /* ret = -ENODEV here? */
00659                         }
00660                         p[i] = 0;
00661                 }
00662 
00663                 /* Now read it back. */
00664                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
00665                 if (ret) {
00666                         DBGC(&tp->pdev->dev, "%s: Buffer read failed. "
00667                                 "err = %d\n", __func__, ret);
00668                         break;
00669                 }
00670 
00671                 /* Verify it. */
00672                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
00673                         if (p[i] == i)
00674                                 continue;
00675 
00676                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
00677                             DMA_RWCTRL_WRITE_BNDRY_16) {
00678                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
00679                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
00680                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
00681                                 break;
00682                         } else {
00683                                 DBGC(&tp->pdev->dev,
00684                                         "%s: Buffer corrupted on read back! "
00685                                         "(%d != %d)\n", __func__, p[i], i);
00686                                 ret = -ENODEV;
00687                                 goto out;
00688                         }
00689                 }
00690 
00691                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
00692                         /* Success. */
00693                         ret = 0;
00694                         break;
00695                 }
00696         }
00697 
00698         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
00699             DMA_RWCTRL_WRITE_BNDRY_16) {
00700                 /* DMA test passed without adjusting DMA boundary,
00701                  * now look for chipsets that are known to expose the
00702                  * DMA bug without failing the test.
00703                  */
00704                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
00705                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
00706 
00707                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
00708         }
00709 
00710 out:
00711         free_dma(buf, TEST_BUFFER_SIZE);
00712 out_nofree:
00713         return ret;
00714 }
00715 
00716 static int tg3_init_one(struct pci_device *pdev)
00717 {       DBGP("%s\n", __func__);
00718 
00719         struct net_device *dev;
00720         struct tg3 *tp;
00721         int err = 0;
00722         unsigned long reg_base, reg_size;
00723 
00724         adjust_pci_device(pdev);
00725 
00726         dev = alloc_etherdev(sizeof(*tp));
00727         if (!dev) {
00728                 DBGC(&pdev->dev, "Failed to allocate etherdev\n");
00729                 err = -ENOMEM;
00730                 goto err_out_disable_pdev;
00731         }
00732 
00733         netdev_init(dev, &tg3_netdev_ops);
00734         pci_set_drvdata(pdev, dev);
00735 
00736         dev->dev = &pdev->dev;
00737 
00738         tp = netdev_priv(dev);
00739         tp->pdev = pdev;
00740         tp->dev = dev;
00741         tp->rx_mode = TG3_DEF_RX_MODE;
00742         tp->tx_mode = TG3_DEF_TX_MODE;
00743 
00744         /* Subsystem IDs are required later */
00745         pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->subsystem_vendor);
00746         pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_ID, &tp->subsystem_device);
00747 
00748         /* The word/byte swap controls here control register access byte
00749          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
00750          * setting below.
00751          */
00752         tp->misc_host_ctrl =
00753                 MISC_HOST_CTRL_MASK_PCI_INT |
00754                 MISC_HOST_CTRL_WORD_SWAP |
00755                 MISC_HOST_CTRL_INDIR_ACCESS |
00756                 MISC_HOST_CTRL_PCISTATE_RW;
00757 
00758         /* The NONFRM (non-frame) byte/word swap controls take effect
00759          * on descriptor entries, anything which isn't packet data.
00760          *
00761          * The StrongARM chips on the board (one for tx, one for rx)
00762          * are running in big-endian mode.
00763          */
00764         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
00765                         GRC_MODE_WSWAP_NONFRM_DATA);
00766 #if __BYTE_ORDER == __BIG_ENDIAN
00767         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
00768 #endif
00769 
00770         /* FIXME: how can we detect errors here? */
00771         reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
00772         reg_size = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
00773 
00774         tp->regs = ioremap(reg_base, reg_size);
00775         if (!tp->regs) {
00776                 DBGC(&pdev->dev, "Failed to remap device registers\n");
00777                 errno = -ENOENT;
00778                 goto err_out_disable_pdev;
00779         }
00780 
00781         err = tg3_get_invariants(tp);
00782         if (err) {
00783                 DBGC(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
00784                 goto err_out_iounmap;
00785         }
00786 
00787         tg3_init_bufmgr_config(tp);
00788 
00789         err = tg3_get_device_address(tp);
00790         if (err) {
00791                 DBGC(&pdev->dev, "Could not obtain valid ethernet address, aborting\n");
00792                 goto err_out_iounmap;
00793         }
00794 
00795         /*
00796          * Reset chip in case UNDI or EFI driver did not shutdown
00797          * DMA self test will enable WDMAC and we'll see (spurious)
00798          * pending DMA on the PCI bus at that point.
00799          */
00800         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
00801             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
00802                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
00803                 tg3_halt(tp);
00804         }
00805 
00806         err = tg3_test_dma(tp);
00807         if (err) {
00808                 DBGC(&pdev->dev, "DMA engine test failed, aborting\n");
00809                 goto err_out_iounmap;
00810         }
00811 
00812         tp->int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
00813         tp->consmbox = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
00814         tp->prodmbox = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
00815 
00816         tp->coal_now = HOSTCC_MODE_NOW;
00817 
00818         err = register_netdev(dev);
00819         if (err) {
00820                 DBGC(&pdev->dev, "Cannot register net device, aborting\n");
00821                 goto err_out_iounmap;
00822         }
00823 
00824         /* Call tg3_setup_phy() to start autoneg process, which saves time
00825          * over starting autoneg in tg3_open();
00826          */
00827         err = tg3_setup_phy(tp, 0);
00828         if (err) {
00829                 DBGC(tp->dev, "tg3_setup_phy() call failed in %s\n", __func__);
00830                 goto err_out_iounmap;
00831         }
00832 
00833         return 0;
00834 
00835 err_out_iounmap:
00836         if (tp->regs) {
00837                 iounmap(tp->regs);
00838                 tp->regs = NULL;
00839         }
00840 
00841         netdev_put(dev);
00842 
00843 err_out_disable_pdev:
00844         pci_set_drvdata(pdev, NULL);
00845         return err;
00846 }
00847 
00848 static void tg3_remove_one(struct pci_device *pci)
00849 {       DBGP("%s\n", __func__);
00850 
00851         struct net_device *netdev = pci_get_drvdata(pci);
00852 
00853         unregister_netdev(netdev);
00854         netdev_nullify(netdev);
00855         netdev_put(netdev);
00856 }
00857 
00858 static struct pci_device_id tg3_nics[] = {
00859         PCI_ROM(0x14e4, 0x1644, "14e4-1644", "14e4-1644", 0),
00860         PCI_ROM(0x14e4, 0x1645, "14e4-1645", "14e4-1645", 0),
00861         PCI_ROM(0x14e4, 0x1646, "14e4-1646", "14e4-1646", 0),
00862         PCI_ROM(0x14e4, 0x1647, "14e4-1647", "14e4-1647", 0),
00863         PCI_ROM(0x14e4, 0x1648, "14e4-1648", "14e4-1648", 0),
00864         PCI_ROM(0x14e4, 0x164d, "14e4-164d", "14e4-164d", 0),
00865         PCI_ROM(0x14e4, 0x1653, "14e4-1653", "14e4-1653", 0),
00866         PCI_ROM(0x14e4, 0x1654, "14e4-1654", "14e4-1654", 0),
00867         PCI_ROM(0x14e4, 0x165d, "14e4-165d", "14e4-165d", 0),
00868         PCI_ROM(0x14e4, 0x165e, "14e4-165e", "14e4-165e", 0),
00869         PCI_ROM(0x14e4, 0x16a6, "14e4-16a6", "14e4-16a6", 0),
00870         PCI_ROM(0x14e4, 0x16a7, "14e4-16a7", "14e4-16a7", 0),
00871         PCI_ROM(0x14e4, 0x16a8, "14e4-16a8", "14e4-16a8", 0),
00872         PCI_ROM(0x14e4, 0x16c6, "14e4-16c6", "14e4-16c6", 0),
00873         PCI_ROM(0x14e4, 0x16c7, "14e4-16c7", "14e4-16c7", 0),
00874         PCI_ROM(0x14e4, 0x1696, "14e4-1696", "14e4-1696", 0),
00875         PCI_ROM(0x14e4, 0x169c, "14e4-169c", "14e4-169c", 0),
00876         PCI_ROM(0x14e4, 0x169d, "14e4-169d", "14e4-169d", 0),
00877         PCI_ROM(0x14e4, 0x170d, "14e4-170d", "14e4-170d", 0),
00878         PCI_ROM(0x14e4, 0x170e, "14e4-170e", "14e4-170e", 0),
00879         PCI_ROM(0x14e4, 0x1649, "14e4-1649", "14e4-1649", 0),
00880         PCI_ROM(0x14e4, 0x166e, "14e4-166e", "14e4-166e", 0),
00881         PCI_ROM(0x14e4, 0x1659, "14e4-1659", "14e4-1659", 0),
00882         PCI_ROM(0x14e4, 0x165a, "14e4-165a", "14e4-165a", 0),
00883         PCI_ROM(0x14e4, 0x1677, "14e4-1677", "14e4-1677", 0),
00884         PCI_ROM(0x14e4, 0x167d, "14e4-167d", "14e4-167d", 0),
00885         PCI_ROM(0x14e4, 0x167e, "14e4-167e", "14e4-167e", 0),
00886         PCI_ROM(0x14e4, 0x1600, "14e4-1600", "14e4-1600", 0),
00887         PCI_ROM(0x14e4, 0x1601, "14e4-1601", "14e4-1601", 0),
00888         PCI_ROM(0x14e4, 0x16f7, "14e4-16f7", "14e4-16f7", 0),
00889         PCI_ROM(0x14e4, 0x16fd, "14e4-16fd", "14e4-16fd", 0),
00890         PCI_ROM(0x14e4, 0x16fe, "14e4-16fe", "14e4-16fe", 0),
00891         PCI_ROM(0x14e4, 0x167a, "14e4-167a", "14e4-167a", 0),
00892         PCI_ROM(0x14e4, 0x1672, "14e4-1672", "14e4-1672", 0),
00893         PCI_ROM(0x14e4, 0x167b, "14e4-167b", "14e4-167b", 0),
00894         PCI_ROM(0x14e4, 0x1673, "14e4-1673", "14e4-1673", 0),
00895         PCI_ROM(0x14e4, 0x1674, "14e4-1674", "14e4-1674", 0),
00896         PCI_ROM(0x14e4, 0x169a, "14e4-169a", "14e4-169a", 0),
00897         PCI_ROM(0x14e4, 0x169b, "14e4-169b", "14e4-169b", 0),
00898         PCI_ROM(0x14e4, 0x1693, "14e4-1693", "14e4-1693", 0),
00899         PCI_ROM(0x14e4, 0x167f, "14e4-167f", "14e4-167f", 0),
00900         PCI_ROM(0x14e4, 0x1668, "14e4-1668", "14e4-1668", 0),
00901         PCI_ROM(0x14e4, 0x1669, "14e4-1669", "14e4-1669", 0),
00902         PCI_ROM(0x14e4, 0x1678, "14e4-1678", "14e4-1678", 0),
00903         PCI_ROM(0x14e4, 0x1679, "14e4-1679", "14e4-1679", 0),
00904         PCI_ROM(0x14e4, 0x166a, "14e4-166a", "14e4-166a", 0),
00905         PCI_ROM(0x14e4, 0x166b, "14e4-166b", "14e4-166b", 0),
00906         PCI_ROM(0x14e4, 0x16dd, "14e4-16dd", "14e4-16dd", 0),
00907         PCI_ROM(0x14e4, 0x1712, "14e4-1712", "14e4-1712", 0),
00908         PCI_ROM(0x14e4, 0x1713, "14e4-1713", "14e4-1713", 0),
00909         PCI_ROM(0x14e4, 0x1698, "14e4-1698", "14e4-1698", 0),
00910         PCI_ROM(0x14e4, 0x1684, "14e4-1684", "14e4-1684", 0),
00911         PCI_ROM(0x14e4, 0x165b, "14e4-165b", "14e4-165b", 0),
00912         PCI_ROM(0x14e4, 0x1681, "14e4-1681", "14e4-1681", 0),
00913         PCI_ROM(0x14e4, 0x1682, "14e4-1682", "14e4-1682", 0),
00914         PCI_ROM(0x14e4, 0x1680, "14e4-1680", "14e4-1680", 0),
00915         PCI_ROM(0x14e4, 0x1688, "14e4-1688", "14e4-1688", 0),
00916         PCI_ROM(0x14e4, 0x1689, "14e4-1689", "14e4-1689", 0),
00917         PCI_ROM(0x14e4, 0x1699, "14e4-1699", "14e4-1699", 0),
00918         PCI_ROM(0x14e4, 0x16a0, "14e4-16a0", "14e4-16a0", 0),
00919         PCI_ROM(0x14e4, 0x1692, "14e4-1692", "14e4-1692", 0),
00920         PCI_ROM(0x14e4, 0x1690, "14e4-1690", "14e4-1690", 0),
00921         PCI_ROM(0x14e4, 0x1694, "14e4-1694", "14e4-1694", 0),
00922         PCI_ROM(0x14e4, 0x1691, "14e4-1691", "14e4-1691", 0),
00923         PCI_ROM(0x14e4, 0x1655, "14e4-1655", "14e4-1655", 0),
00924         PCI_ROM(0x14e4, 0x1656, "14e4-1656", "14e4-1656", 0),
00925         PCI_ROM(0x14e4, 0x16b1, "14e4-16b1", "14e4-16b1", 0),
00926         PCI_ROM(0x14e4, 0x16b5, "14e4-16b5", "14e4-16b5", 0),
00927         PCI_ROM(0x14e4, 0x16b0, "14e4-16b0", "14e4-16b0", 0),
00928         PCI_ROM(0x14e4, 0x16b4, "14e4-16b4", "14e4-16b4", 0),
00929         PCI_ROM(0x14e4, 0x16b2, "14e4-16b2", "14e4-16b2", 0),
00930         PCI_ROM(0x14e4, 0x16b6, "14e4-16b6", "14e4-16b6", 0),
00931         PCI_ROM(0x14e4, 0x1657, "14e4-1657", "14e4-1657", 0),
00932         PCI_ROM(0x14e4, 0x165f, "14e4-165f", "14e4-165f", 0),
00933         PCI_ROM(0x14e4, 0x1686, "14e4-1686", "14e4-1686", 0),
00934         PCI_ROM(0x1148, 0x4400, "1148-4400", "1148-4400", 0),
00935         PCI_ROM(0x1148, 0x4500, "1148-4500", "1148-4500", 0),
00936         PCI_ROM(0x173b, 0x03e8, "173b-03e8", "173b-03e8", 0),
00937         PCI_ROM(0x173b, 0x03e9, "173b-03e9", "173b-03e9", 0),
00938         PCI_ROM(0x173b, 0x03eb, "173b-03eb", "173b-03eb", 0),
00939         PCI_ROM(0x173b, 0x03ea, "173b-03ea", "173b-03ea", 0),
00940         PCI_ROM(0x106b, 0x1645, "106b-1645", "106b-1645", 0),
00941 };
00942 
00943 struct pci_driver tg3_pci_driver __pci_driver = {
00944         .ids = tg3_nics,
00945         .id_count = ARRAY_SIZE(tg3_nics),
00946         .probe = tg3_init_one,
00947         .remove = tg3_remove_one,
00948 };