iPXE
b44.c
Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2008 Stefan Hajnoczi <stefanha@gmail.com>
00003  * Copyright (c) 2008 Pantelis Koukousoulas <pktoss@gmail.com>
00004  *
00005  * This program is free software; you can redistribute it and/or
00006  * modify it under the terms of the GNU General Public License as
00007  * published by the Free Software Foundation; either version 2 of the
00008  * License, or any later version.
00009  *
00010  * This program is distributed in the hope that it will be useful, but
00011  * WITHOUT ANY WARRANTY; without even the implied warranty of
00012  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013  * General Public License for more details.
00014  *
00015  * You should have received a copy of the GNU General Public License
00016  * along with this program; if not, write to the Free Software
00017  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
00018  * 02110-1301, USA.
00019  *
00020  * This driver is a port of the b44 linux driver version 1.01
00021  *
00022  * Copyright (c) 2002 David S. Miller <davem@redhat.com>
00023  * Copyright (c) Pekka Pietikainen <pp@ee.oulu.fi>
00024  * Copyright (C) 2006 Broadcom Corporation.
00025  *
00026  * Some ssb bits copied from version 2.0 of the b44 driver
00027  * Copyright (c) Michael Buesch
00028  *
00029  * Copyright (c) a lot of people too. Please respect their work.
00030  */
00031 
00032 FILE_LICENCE ( GPL2_OR_LATER );
00033 
00034 #include <errno.h>
00035 #include <assert.h>
00036 #include <stdio.h>
00037 #include <unistd.h>
00038 #include <byteswap.h>
00039 #include <ipxe/io.h>
00040 #include <mii.h>
00041 #include <ipxe/iobuf.h>
00042 #include <ipxe/malloc.h>
00043 #include <ipxe/pci.h>
00044 #include <ipxe/netdevice.h>
00045 #include <ipxe/ethernet.h>
00046 #include <ipxe/if_ether.h>
00047 #include "b44.h"
00048 
00049 
00050 static inline int ring_next(int index)
00051 {
00052         /* B44_RING_SIZE is a power of 2 :) */
00053         return (index + 1) & (B44_RING_SIZE - 1);
00054 }
00055 
00056 
00057 /* Memory-mapped I/O wrappers */
00058 
00059 static inline u32 br32(const struct b44_private *bp, u32 reg)
00060 {
00061         return readl(bp->regs + reg);
00062 }
00063 
00064 
00065 static inline void bw32(const struct b44_private *bp, u32 reg, u32 val)
00066 {
00067         writel(val, bp->regs + reg);
00068 }
00069 
00070 
00071 static inline void bflush(const struct b44_private *bp, u32 reg, u32 timeout)
00072 {
00073         readl(bp->regs + reg);
00074         udelay(timeout);
00075 }
00076 
00077 
00078 #define VIRT_TO_B44(addr)       ( virt_to_bus(addr) + SB_PCI_DMA )
00079 
00080 
00081 /**
00082  * Check if card can access address
00083  *
00084  * @v address           Virtual address
00085  * @v address_ok        Card can access address
00086  */
00087 static inline __attribute__ (( always_inline )) int
00088 b44_address_ok ( void *address ) {
00089 
00090         /* Card can address anything with a 30-bit address */
00091         if ( ( virt_to_bus ( address ) & ~B44_30BIT_DMA_MASK ) == 0 )
00092                 return 1;
00093 
00094         return 0;
00095 }
00096 
00097 /**
00098  * Ring cells waiting to be processed are between 'tx_cur' and 'pending'
00099  * indexes in the ring.
00100  */
00101 static u32 pending_tx_index(struct b44_private *bp)
00102 {
00103         u32 pending = br32(bp, B44_DMATX_STAT);
00104         pending &= DMATX_STAT_CDMASK;
00105 
00106         pending /= sizeof(struct dma_desc);
00107         return pending & (B44_RING_SIZE - 1);
00108 }
00109 
00110 
00111 /**
00112  * Ring cells waiting to be processed are between 'rx_cur' and 'pending'
00113  * indexes in the ring.
00114  */
00115 static u32 pending_rx_index(struct b44_private *bp)
00116 {
00117         u32 pending = br32(bp, B44_DMARX_STAT);
00118         pending &= DMARX_STAT_CDMASK;
00119 
00120         pending /= sizeof(struct dma_desc);
00121         return pending & (B44_RING_SIZE - 1);
00122 }
00123 
00124 
00125 /**
00126  * Wait until the given bit is set/cleared.
00127  */
00128 static int b44_wait_bit(struct b44_private *bp, unsigned long reg, u32 bit,
00129                                     unsigned long timeout, const int clear)
00130 {
00131         unsigned long i;
00132 
00133         for (i = 0; i < timeout; i++) {
00134                 u32 val = br32(bp, reg);
00135 
00136                 if (clear && !(val & bit))
00137                         break;
00138 
00139                 if (!clear && (val & bit))
00140                         break;
00141 
00142                 udelay(10);
00143         }
00144         if (i == timeout) {
00145                 return -ENODEV;
00146         }
00147         return 0;
00148 }
00149 
00150 
00151 /*
00152  * Sonics Silicon Backplane support. SSB is a mini-bus interconnecting
00153  * so-called IP Cores. One of those cores implements the Fast Ethernet
00154  * functionality and another one the PCI engine.
00155  *
00156  * You need to switch to the core you want to talk to before actually
00157  * sending commands.
00158  *
00159  * See: http://bcm-v4.sipsolutions.net/Backplane for (reverse-engineered)
00160  * specs.
00161  */
00162 
00163 static inline u32 ssb_get_core_rev(struct b44_private *bp)
00164 {
00165         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
00166 }
00167 
00168 
00169 static inline int ssb_is_core_up(struct b44_private *bp)
00170 {
00171         return ((br32(bp, B44_SBTMSLOW) & (SSB_CORE_DOWN | SBTMSLOW_CLOCK))
00172                                                         == SBTMSLOW_CLOCK);
00173 }
00174 
00175 
00176 static u32 ssb_pci_setup(struct b44_private *bp, u32 cores)
00177 {
00178         u32 bar_orig, pci_rev, val;
00179 
00180         pci_read_config_dword(bp->pci, SSB_BAR0_WIN, &bar_orig);
00181         pci_write_config_dword(bp->pci, SSB_BAR0_WIN,
00182                                BCM4400_PCI_CORE_ADDR);
00183         pci_rev = ssb_get_core_rev(bp);
00184 
00185         val = br32(bp, B44_SBINTVEC);
00186         val |= cores;
00187         bw32(bp, B44_SBINTVEC, val);
00188 
00189         val = br32(bp, SSB_PCI_TRANS_2);
00190         val |= SSB_PCI_PREF | SSB_PCI_BURST;
00191         bw32(bp, SSB_PCI_TRANS_2, val);
00192 
00193         pci_write_config_dword(bp->pci, SSB_BAR0_WIN, bar_orig);
00194 
00195         return pci_rev;
00196 }
00197 
00198 
00199 static void ssb_core_disable(struct b44_private *bp)
00200 {
00201         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
00202                 return;
00203 
00204         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
00205         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
00206         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
00207 
00208         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
00209                                                 SSB_CORE_DOWN));
00210         bflush(bp, B44_SBTMSLOW, 1);
00211 
00212         bw32(bp, B44_SBTMSLOW, SSB_CORE_DOWN);
00213         bflush(bp, B44_SBTMSLOW, 1);
00214 }
00215 
00216 
00217 static void ssb_core_reset(struct b44_private *bp)
00218 {
00219         u32 val;
00220         const u32 mask = (SBTMSLOW_CLOCK | SBTMSLOW_FGC | SBTMSLOW_RESET);
00221 
00222         ssb_core_disable(bp);
00223 
00224         bw32(bp, B44_SBTMSLOW, mask);
00225         bflush(bp, B44_SBTMSLOW, 1);
00226 
00227         /* Clear SERR if set, this is a hw bug workaround.  */
00228         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
00229                 bw32(bp, B44_SBTMSHIGH, 0);
00230 
00231         val = br32(bp, B44_SBIMSTATE);
00232         if (val & (SBIMSTATE_BAD)) {
00233                 bw32(bp, B44_SBIMSTATE, val & ~SBIMSTATE_BAD);
00234         }
00235 
00236         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
00237         bflush(bp, B44_SBTMSLOW, 1);
00238 
00239         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
00240         bflush(bp, B44_SBTMSLOW, 1);
00241 }
00242 
00243 
00244 /*
00245  * Driver helper functions
00246  */
00247 
00248 /*
00249  * Chip reset provides power to the b44 MAC & PCI cores, which
00250  * is necessary for MAC register access. We only do a partial
00251  * reset in case of transmit/receive errors (ISTAT_ERRORS) to
00252  * avoid the chip being hung for an unnecessary long time in
00253  * this case.
00254  *
00255  * Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe
00256  */
00257 static void b44_chip_reset(struct b44_private *bp, int reset_kind)
00258 {
00259         if (ssb_is_core_up(bp)) {
00260                 bw32(bp, B44_RCV_LAZY, 0);
00261 
00262                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
00263 
00264                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
00265 
00266                 bw32(bp, B44_DMATX_CTRL, 0);
00267 
00268                 bp->tx_dirty = bp->tx_cur = 0;
00269 
00270                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK)
00271                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
00272                                                                   100, 0);
00273 
00274                 bw32(bp, B44_DMARX_CTRL, 0);
00275 
00276                 bp->rx_cur = 0;
00277         } else {
00278                 ssb_pci_setup(bp, SBINTVEC_ENET0);
00279         }
00280 
00281         ssb_core_reset(bp);
00282 
00283         /* Don't enable PHY if we are only doing a partial reset. */
00284         if (reset_kind == B44_CHIP_RESET_PARTIAL)
00285                 return;
00286 
00287         /* Make PHY accessible. */
00288         bw32(bp, B44_MDIO_CTRL,
00289              (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK)));
00290         bflush(bp, B44_MDIO_CTRL, 1);
00291 
00292         /* Enable internal or external PHY */
00293         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
00294                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
00295                 bflush(bp, B44_ENET_CTRL, 1);
00296         } else {
00297                 u32 val = br32(bp, B44_DEVCTRL);
00298                 if (val & DEVCTRL_EPR) {
00299                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
00300                         bflush(bp, B44_DEVCTRL, 100);
00301                 }
00302         }
00303 }
00304 
00305 
00306 /**
00307  * called by b44_poll in the error path
00308  */
00309 static void b44_halt(struct b44_private *bp)
00310 {
00311         /* disable ints */
00312         bw32(bp, B44_IMASK, 0);
00313         bflush(bp, B44_IMASK, 1);
00314 
00315         DBG("b44: powering down PHY\n");
00316         bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
00317 
00318         /*
00319          * Now reset the chip, but without enabling
00320          * the MAC&PHY part of it.
00321          * This has to be done _after_ we shut down the PHY
00322          */
00323         b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
00324 }
00325 
00326 
00327 
00328 /*
00329  * Called at device open time to get the chip ready for
00330  * packet processing.
00331  *
00332  * Called-by: b44_open
00333  */
00334 static void b44_init_hw(struct b44_private *bp, int reset_kind)
00335 {
00336         u32 val;
00337 #define CTRL_MASK (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))
00338 
00339         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
00340         if (reset_kind == B44_FULL_RESET) {
00341                 b44_phy_reset(bp);
00342         }
00343 
00344         /* Enable CRC32, set proper LED modes and power on PHY */
00345         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
00346         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
00347 
00348         /* This sets the MAC address too.  */
00349         b44_set_rx_mode(bp->netdev);
00350 
00351         /* MTU + eth header + possible VLAN tag + struct rx_header */
00352         bw32(bp, B44_RXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
00353         bw32(bp, B44_TXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
00354 
00355         bw32(bp, B44_TX_HIWMARK, TX_HIWMARK_DEFLT);
00356         if (reset_kind == B44_PARTIAL_RESET) {
00357                 bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
00358         } else {
00359                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
00360                 bw32(bp, B44_DMATX_ADDR, VIRT_TO_B44(bp->tx));
00361 
00362                 bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
00363                 bw32(bp, B44_DMARX_ADDR, VIRT_TO_B44(bp->rx));
00364                 bw32(bp, B44_DMARX_PTR, B44_RX_RING_LEN_BYTES);
00365 
00366                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
00367         }
00368 
00369         val = br32(bp, B44_ENET_CTRL);
00370         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
00371 #undef CTRL_MASK
00372 }
00373 
00374 
00375 /***  Management of ring descriptors  ***/
00376 
00377 
00378 static void b44_populate_rx_descriptor(struct b44_private *bp, u32 idx)
00379 {
00380         struct rx_header *rh;
00381         u32 ctrl, addr;
00382 
00383         rh = bp->rx_iobuf[idx]->data;
00384         rh->len = 0;
00385         rh->flags = 0;
00386         ctrl = DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET);
00387         if (idx == B44_RING_LAST) {
00388                 ctrl |= DESC_CTRL_EOT;
00389         }
00390         addr = VIRT_TO_B44(bp->rx_iobuf[idx]->data);
00391 
00392         bp->rx[idx].ctrl = cpu_to_le32(ctrl);
00393         bp->rx[idx].addr = cpu_to_le32(addr);
00394         bw32(bp, B44_DMARX_PTR, idx * sizeof(struct dma_desc));
00395 }
00396 
00397 
00398 /*
00399  * Refill RX ring descriptors with buffers. This is needed
00400  * because during rx we are passing ownership of descriptor
00401  * buffers to the network stack.
00402  */
00403 static void b44_rx_refill(struct b44_private *bp, u32 pending)
00404 {
00405         struct io_buffer *iobuf;
00406         u32 i;
00407 
00408         // skip pending
00409         for (i = pending + 1; i != bp->rx_cur; i = ring_next(i)) {
00410                 if (bp->rx_iobuf[i] != NULL)
00411                         continue;
00412 
00413                 iobuf = alloc_iob(RX_PKT_BUF_SZ);
00414                 if (!iobuf) {
00415                         DBG("Refill rx ring failed!!\n");
00416                         break;
00417                 }
00418                 if (!b44_address_ok(iobuf->data)) {
00419                         DBG("Refill rx ring bad address!!\n");
00420                         free_iob(iobuf);
00421                         break;
00422                 }
00423                 bp->rx_iobuf[i] = iobuf;
00424 
00425                 b44_populate_rx_descriptor(bp, i);
00426         }
00427 }
00428 
00429 
00430 static void b44_free_rx_ring(struct b44_private *bp)
00431 {
00432         u32 i;
00433 
00434         if (bp->rx) {
00435                 for (i = 0; i < B44_RING_SIZE; i++) {
00436                         free_iob(bp->rx_iobuf[i]);
00437                         bp->rx_iobuf[i] = NULL;
00438                 }
00439                 free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
00440                 bp->rx = NULL;
00441         }
00442 }
00443 
00444 
00445 static int b44_init_rx_ring(struct b44_private *bp)
00446 {
00447         b44_free_rx_ring(bp);
00448 
00449         bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
00450         if (!bp->rx)
00451                 return -ENOMEM;
00452         if (!b44_address_ok(bp->rx)) {
00453                 free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
00454                 return -ENOTSUP;
00455         }
00456 
00457         memset(bp->rx_iobuf, 0, sizeof(bp->rx_iobuf));
00458 
00459         bp->rx_iobuf[0] = alloc_iob(RX_PKT_BUF_SZ);
00460         b44_populate_rx_descriptor(bp, 0);
00461         b44_rx_refill(bp, 0);
00462 
00463         DBG("Init RX rings: rx=0x%08lx\n", VIRT_TO_B44(bp->rx));
00464         return 0;
00465 }
00466 
00467 
00468 static void b44_free_tx_ring(struct b44_private *bp)
00469 {
00470         if (bp->tx) {
00471                 free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
00472                 bp->tx = NULL;
00473         }
00474 }
00475 
00476 
00477 static int b44_init_tx_ring(struct b44_private *bp)
00478 {
00479         b44_free_tx_ring(bp);
00480 
00481         bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
00482         if (!bp->tx)
00483                 return -ENOMEM;
00484         if (!b44_address_ok(bp->tx)) {
00485                 free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
00486                 return -ENOTSUP;
00487         }
00488 
00489         memset(bp->tx, 0, B44_TX_RING_LEN_BYTES);
00490         memset(bp->tx_iobuf, 0, sizeof(bp->tx_iobuf));
00491 
00492         DBG("Init TX rings: tx=0x%08lx\n", VIRT_TO_B44(bp->tx));
00493         return 0;
00494 }
00495 
00496 
00497 /*** Interaction with the PHY ***/
00498 
00499 
00500 static int b44_phy_read(struct b44_private *bp, int reg, u32 * val)
00501 {
00502         int err;
00503 
00504         u32 arg1 = (MDIO_OP_READ << MDIO_DATA_OP_SHIFT);
00505         u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
00506         u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
00507         u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
00508         u32 argv = arg1 | arg2 | arg3 | arg4;
00509 
00510         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
00511         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
00512         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
00513         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
00514 
00515         return err;
00516 }
00517 
00518 
00519 static int b44_phy_write(struct b44_private *bp, int reg, u32 val)
00520 {
00521         u32 arg1 = (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT);
00522         u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
00523         u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
00524         u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
00525         u32 arg5 = (val & MDIO_DATA_DATA);
00526         u32 argv = arg1 | arg2 | arg3 | arg4 | arg5;
00527 
00528 
00529         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
00530         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
00531         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
00532 }
00533 
00534 
00535 static int b44_phy_reset(struct b44_private *bp)
00536 {
00537         u32 val;
00538         int err;
00539 
00540         err = b44_phy_write(bp, MII_BMCR, BMCR_RESET);
00541         if (err)
00542                 return err;
00543 
00544         udelay(100);
00545         err = b44_phy_read(bp, MII_BMCR, &val);
00546         if (!err) {
00547                 if (val & BMCR_RESET) {
00548                         return -ENODEV;
00549                 }
00550         }
00551 
00552         return 0;
00553 }
00554 
00555 
00556 /*
00557  * The BCM44xx CAM (Content Addressable Memory) stores the MAC
00558  * and PHY address.
00559  */
00560 static void b44_cam_write(struct b44_private *bp, unsigned char *data,
00561                                                             int index)
00562 {
00563         u32 val;
00564 
00565         val  = ((u32) data[2]) << 24;
00566         val |= ((u32) data[3]) << 16;
00567         val |= ((u32) data[4]) << 8;
00568         val |= ((u32) data[5]) << 0;
00569         bw32(bp, B44_CAM_DATA_LO, val);
00570 
00571 
00572         val = (CAM_DATA_HI_VALID |
00573                (((u32) data[0]) << 8) | (((u32) data[1]) << 0));
00574 
00575         bw32(bp, B44_CAM_DATA_HI, val);
00576 
00577         val = CAM_CTRL_WRITE | (index << CAM_CTRL_INDEX_SHIFT);
00578         bw32(bp, B44_CAM_CTRL, val);
00579 
00580         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
00581 }
00582 
00583 
00584 static void b44_set_mac_addr(struct b44_private *bp)
00585 {
00586         u32 val;
00587         bw32(bp, B44_CAM_CTRL, 0);
00588         b44_cam_write(bp, bp->netdev->ll_addr, 0);
00589         val = br32(bp, B44_CAM_CTRL);
00590         bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
00591 }
00592 
00593 
00594 /* Read 128-bytes of EEPROM. */
00595 static void b44_read_eeprom(struct b44_private *bp, u8 * data)
00596 {
00597         long i;
00598         u16 *ptr = (u16 *) data;
00599 
00600         for (i = 0; i < 128; i += 2)
00601                 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
00602 }
00603 
00604 
00605 static void b44_load_mac_and_phy_addr(struct b44_private *bp)
00606 {
00607         u8 eeprom[128];
00608 
00609         /* Load MAC address, note byteswapping */
00610         b44_read_eeprom(bp, &eeprom[0]);
00611         bp->netdev->hw_addr[0] = eeprom[79];
00612         bp->netdev->hw_addr[1] = eeprom[78];
00613         bp->netdev->hw_addr[2] = eeprom[81];
00614         bp->netdev->hw_addr[3] = eeprom[80];
00615         bp->netdev->hw_addr[4] = eeprom[83];
00616         bp->netdev->hw_addr[5] = eeprom[82];
00617 
00618         /* Load PHY address */
00619         bp->phy_addr = eeprom[90] & 0x1f;
00620 }
00621 
00622 
00623 static void b44_set_rx_mode(struct net_device *netdev)
00624 {
00625         struct b44_private *bp = netdev_priv(netdev);
00626         unsigned char zero[6] = { 0, 0, 0, 0, 0, 0 };
00627         u32 val;
00628         int i;
00629 
00630         val = br32(bp, B44_RXCONFIG);
00631         val &= ~RXCONFIG_PROMISC;
00632         val |= RXCONFIG_ALLMULTI;
00633 
00634         b44_set_mac_addr(bp);
00635 
00636         for (i = 1; i < 64; i++)
00637                 b44_cam_write(bp, zero, i);
00638 
00639         bw32(bp, B44_RXCONFIG, val);
00640         val = br32(bp, B44_CAM_CTRL);
00641         bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
00642 }
00643 
00644 
00645 /*** Implementation of iPXE driver callbacks ***/
00646 
00647 /**
00648  * Probe device
00649  *
00650  * @v pci       PCI device
00651  * @v id        Matching entry in ID table
00652  * @ret rc      Return status code
00653  */
00654 static int b44_probe(struct pci_device *pci)
00655 {
00656         struct net_device *netdev;
00657         struct b44_private *bp;
00658         int rc;
00659 
00660         /* Set up netdev */
00661         netdev = alloc_etherdev(sizeof(*bp));
00662         if (!netdev)
00663                 return -ENOMEM;
00664 
00665         netdev_init(netdev, &b44_operations);
00666         pci_set_drvdata(pci, netdev);
00667         netdev->dev = &pci->dev;
00668 
00669         /* Set up private data */
00670         bp = netdev_priv(netdev);
00671         memset(bp, 0, sizeof(*bp));
00672         bp->netdev = netdev;
00673         bp->pci = pci;
00674 
00675         /* Map device registers */
00676         bp->regs = ioremap(pci->membase, B44_REGS_SIZE);
00677         if (!bp->regs) {
00678                 netdev_put(netdev);
00679                 return -ENOMEM;
00680         }
00681 
00682         /* Enable PCI bus mastering */
00683         adjust_pci_device(pci);
00684 
00685         b44_load_mac_and_phy_addr(bp);
00686 
00687         rc = register_netdev(netdev);
00688         if (rc != 0) {
00689                 iounmap(bp->regs);
00690                 netdev_put(netdev);
00691                 return rc;
00692         }
00693 
00694         /* Link management currently not implemented */
00695         netdev_link_up(netdev);
00696 
00697         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
00698 
00699         DBG("b44 %s (%04x:%04x) regs=%p MAC=%s\n", pci->id->name,
00700             pci->id->vendor, pci->id->device, bp->regs,
00701             eth_ntoa(netdev->ll_addr));
00702 
00703         return 0;
00704 }
00705 
00706 
00707 /**
00708  * Remove device
00709  *
00710  * @v pci       PCI device
00711  */
00712 static void b44_remove(struct pci_device *pci)
00713 {
00714         struct net_device *netdev = pci_get_drvdata(pci);
00715         struct b44_private *bp = netdev_priv(netdev);
00716 
00717         ssb_core_disable(bp);
00718         unregister_netdev(netdev);
00719         iounmap(bp->regs);
00720         netdev_nullify(netdev);
00721         netdev_put(netdev);
00722 }
00723 
00724 
00725 /** Enable or disable interrupts
00726  *
00727  * @v netdev    Network device
00728  * @v enable    Interrupts should be enabled
00729  */
00730 static void b44_irq(struct net_device *netdev, int enable)
00731 {
00732         struct b44_private *bp = netdev_priv(netdev);
00733 
00734         /* Interrupt mask specifies which events generate interrupts */
00735         bw32(bp, B44_IMASK, enable ? IMASK_DEF : IMASK_DISABLE);
00736 }
00737 
00738 
00739 /** Open network device
00740  *
00741  * @v netdev    Network device
00742  * @ret rc      Return status code
00743  */
00744 static int b44_open(struct net_device *netdev)
00745 {
00746         struct b44_private *bp = netdev_priv(netdev);
00747         int rc;
00748 
00749         rc = b44_init_tx_ring(bp);
00750         if (rc != 0)
00751                 return rc;
00752 
00753         rc = b44_init_rx_ring(bp);
00754         if (rc != 0)
00755                 return rc;
00756 
00757         b44_init_hw(bp, B44_FULL_RESET);
00758 
00759         /* Disable interrupts */
00760         b44_irq(netdev, 0);
00761 
00762         return 0;
00763 }
00764 
00765 
00766 /** Close network device
00767  *
00768  * @v netdev    Network device
00769  */
00770 static void b44_close(struct net_device *netdev)
00771 {
00772         struct b44_private *bp = netdev_priv(netdev);
00773 
00774         b44_chip_reset(bp, B44_FULL_RESET);
00775         b44_free_tx_ring(bp);
00776         b44_free_rx_ring(bp);
00777 }
00778 
00779 
00780 /** Transmit packet
00781  *
00782  * @v netdev    Network device
00783  * @v iobuf     I/O buffer
00784  * @ret rc      Return status code
00785  */
00786 static int b44_transmit(struct net_device *netdev, struct io_buffer *iobuf)
00787 {
00788         struct b44_private *bp = netdev_priv(netdev);
00789         u32 cur = bp->tx_cur;
00790         u32 ctrl;
00791 
00792         /* Check for TX ring overflow */
00793         if (bp->tx[cur].ctrl) {
00794                 DBG("tx overflow\n");
00795                 return -ENOBUFS;
00796         }
00797 
00798         /* Check for addressability */
00799         if (!b44_address_ok(iobuf->data))
00800                 return -ENOTSUP;
00801 
00802         /* Will call netdev_tx_complete() on the iobuf later */
00803         bp->tx_iobuf[cur] = iobuf;
00804 
00805         /* Set up TX descriptor */
00806         ctrl = (iob_len(iobuf) & DESC_CTRL_LEN) |
00807             DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
00808 
00809         if (cur == B44_RING_LAST)
00810                 ctrl |= DESC_CTRL_EOT;
00811 
00812         bp->tx[cur].ctrl = cpu_to_le32(ctrl);
00813         bp->tx[cur].addr = cpu_to_le32(VIRT_TO_B44(iobuf->data));
00814 
00815         /* Update next available descriptor index */
00816         cur = ring_next(cur);
00817         bp->tx_cur = cur;
00818         wmb();
00819 
00820         /* Tell card that a new TX descriptor is ready */
00821         bw32(bp, B44_DMATX_PTR, cur * sizeof(struct dma_desc));
00822         return 0;
00823 }
00824 
00825 
00826 /** Recycles sent TX descriptors and notifies network stack
00827  *
00828  * @v bp Driver state
00829  */
00830 static void b44_tx_complete(struct b44_private *bp)
00831 {
00832         u32 cur, i;
00833 
00834         cur = pending_tx_index(bp);
00835 
00836         for (i = bp->tx_dirty; i != cur; i = ring_next(i)) {
00837                 /* Free finished frame */
00838                 netdev_tx_complete(bp->netdev, bp->tx_iobuf[i]);
00839                 bp->tx_iobuf[i] = NULL;
00840 
00841                 /* Clear TX descriptor */
00842                 bp->tx[i].ctrl = 0;
00843                 bp->tx[i].addr = 0;
00844         }
00845         bp->tx_dirty = cur;
00846 }
00847 
00848 
00849 static void b44_process_rx_packets(struct b44_private *bp)
00850 {
00851         struct io_buffer *iob;  /* received data */
00852         struct rx_header *rh;
00853         u32 pending, i;
00854         u16 len;
00855 
00856         pending = pending_rx_index(bp);
00857 
00858         for (i = bp->rx_cur; i != pending; i = ring_next(i)) {
00859                 iob = bp->rx_iobuf[i];
00860                 if (iob == NULL)
00861                         break;
00862 
00863                 rh = iob->data;
00864                 len = le16_to_cpu(rh->len);
00865 
00866                 /*
00867                  * Guard against incompletely written RX descriptors.
00868                  * Without this, things can get really slow!
00869                  */
00870                 if (len == 0)
00871                         break;
00872 
00873                 /* Discard CRC that is generated by the card */
00874                 len -= 4;
00875 
00876                 /* Check for invalid packets and errors */
00877                 if (len > RX_PKT_BUF_SZ - RX_PKT_OFFSET ||
00878                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
00879                         DBG("rx error len=%d flags=%04x\n", len,
00880                                          cpu_to_le16(rh->flags));
00881                         rh->len = 0;
00882                         rh->flags = 0;
00883                         netdev_rx_err(bp->netdev, iob, -EINVAL);
00884                         continue;
00885                 }
00886 
00887                 /* Clear RX descriptor */
00888                 rh->len = 0;
00889                 rh->flags = 0;
00890                 bp->rx_iobuf[i] = NULL;
00891 
00892                 /* Hand off the IO buffer to the network stack */
00893                 iob_reserve(iob, RX_PKT_OFFSET);
00894                 iob_put(iob, len);
00895                 netdev_rx(bp->netdev, iob);
00896         }
00897         bp->rx_cur = i;
00898         b44_rx_refill(bp, pending_rx_index(bp));
00899 }
00900 
00901 
00902 /** Poll for completed and received packets
00903  *
00904  * @v netdev    Network device
00905  */
00906 static void b44_poll(struct net_device *netdev)
00907 {
00908         struct b44_private *bp = netdev_priv(netdev);
00909         u32 istat;
00910 
00911         /* Interrupt status */
00912         istat = br32(bp, B44_ISTAT);
00913         istat &= IMASK_DEF;     /* only the events we care about */
00914 
00915         if (!istat)
00916                 return;
00917         if (istat & ISTAT_TX)
00918                 b44_tx_complete(bp);
00919         if (istat & ISTAT_RX)
00920                 b44_process_rx_packets(bp);
00921         if (istat & ISTAT_ERRORS) {
00922                 DBG("b44 error istat=0x%08x\n", istat);
00923 
00924                 /* Reset B44 core partially to avoid long waits */
00925                 b44_irq(bp->netdev, 0);
00926                 b44_halt(bp);
00927                 b44_init_tx_ring(bp);
00928                 b44_init_rx_ring(bp);
00929                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
00930         }
00931 
00932         /* Acknowledge interrupt */
00933         bw32(bp, B44_ISTAT, 0);
00934         bflush(bp, B44_ISTAT, 1);
00935 }
00936 
00937 
00938 static struct net_device_operations b44_operations = {
00939         .open = b44_open,
00940         .close = b44_close,
00941         .transmit = b44_transmit,
00942         .poll = b44_poll,
00943         .irq = b44_irq,
00944 };
00945 
00946 
00947 static struct pci_device_id b44_nics[] = {
00948         PCI_ROM(0x14e4, 0x4401, "BCM4401", "BCM4401", 0),
00949         PCI_ROM(0x14e4, 0x170c, "BCM4401-B0", "BCM4401-B0", 0),
00950         PCI_ROM(0x14e4, 0x4402, "BCM4401-B1", "BCM4401-B1", 0),
00951 };
00952 
00953 
00954 struct pci_driver b44_driver __pci_driver = {
00955         .ids = b44_nics,
00956         .id_count = sizeof b44_nics / sizeof b44_nics[0],
00957         .probe = b44_probe,
00958         .remove = b44_remove,
00959 };