iPXE
skge.c
Go to the documentation of this file.
1 /*
2  * iPXE driver for Marvell Yukon chipset and SysKonnect Gigabit
3  * Ethernet adapters. Derived from Linux skge driver (v1.13), which was
4  * based on earlier sk98lin, e100 and FreeBSD if_sk drivers.
5  *
6  * This driver intentionally does not support all the features of the
7  * original driver such as link fail-over and link management because
8  * those should be done at higher levels.
9  *
10  * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11  *
12  * Modified for iPXE, July 2008 by Michael Decker <mrd999@gmail.com>
13  * Tested and Modified in December 2009 by
14  * Thomas Miletich <thomas.miletich@gmail.com>
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License.
19  *
20  * This program is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23  * GNU General Public License for more details.
24  *
25  * You should have received a copy of the GNU General Public License
26  * along with this program; if not, write to the Free Software
27  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
28  * 02110-1301, USA.
29  */
30 
31 FILE_LICENCE ( GPL2_ONLY );
32 
33 #include <stdint.h>
34 #include <string.h>
35 #include <errno.h>
36 #include <stdio.h>
37 #include <unistd.h>
38 #include <ipxe/netdevice.h>
39 #include <ipxe/ethernet.h>
40 #include <ipxe/if_ether.h>
41 #include <ipxe/iobuf.h>
42 #include <ipxe/malloc.h>
43 #include <ipxe/pci.h>
44 
45 #include "skge.h"
46 
47 static struct pci_device_id skge_id_table[] = {
48  PCI_ROM(0x10b7, 0x1700, "3C940", "3COM 3C940", 0),
49  PCI_ROM(0x10b7, 0x80eb, "3C940B", "3COM 3C940", 0),
50  PCI_ROM(0x1148, 0x4300, "GE", "Syskonnect GE", 0),
51  PCI_ROM(0x1148, 0x4320, "YU", "Syskonnect YU", 0),
52  PCI_ROM(0x1186, 0x4C00, "DGE510T", "DLink DGE-510T", 0),
53  PCI_ROM(0x1186, 0x4b01, "DGE530T", "DLink DGE-530T", 0),
54  PCI_ROM(0x11ab, 0x4320, "id4320", "Marvell id4320", 0),
55  PCI_ROM(0x11ab, 0x5005, "id5005", "Marvell id5005", 0), /* Belkin */
56  PCI_ROM(0x1371, 0x434e, "Gigacard", "CNET Gigacard", 0),
57  PCI_ROM(0x1737, 0x1064, "EG1064", "Linksys EG1064", 0),
58  PCI_ROM(0x1737, 0xffff, "id_any", "Linksys [any]", 0)
59 };
60 
61 static int skge_up(struct net_device *dev);
62 static void skge_down(struct net_device *dev);
63 static void skge_tx_clean(struct net_device *dev);
64 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
65 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
66 static void yukon_init(struct skge_hw *hw, int port);
67 static void genesis_mac_init(struct skge_hw *hw, int port);
68 static void genesis_link_up(struct skge_port *skge);
69 
70 static void skge_phyirq(struct skge_hw *hw);
71 static void skge_poll(struct net_device *dev);
72 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob);
73 static void skge_net_irq ( struct net_device *dev, int enable );
74 
75 static void skge_rx_refill(struct net_device *dev);
76 
78  .open = skge_up,
79  .close = skge_down,
80  .transmit = skge_xmit_frame,
81  .poll = skge_poll,
82  .irq = skge_net_irq
83 };
84 
85 /* Avoid conditionals by using array */
86 static const int txqaddr[] = { Q_XA1, Q_XA2 };
87 static const int rxqaddr[] = { Q_R1, Q_R2 };
88 static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
89 
90 /* Determine supported/advertised modes based on hardware.
91  * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
92  */
93 static u32 skge_supported_modes(const struct skge_hw *hw)
94 {
95  u32 supported;
96 
97  if (hw->copper) {
105 
106  if (hw->chip_id == CHIP_ID_GENESIS)
111 
112  else if (hw->chip_id == CHIP_ID_YUKON)
114  } else
117 
118  return supported;
119 }
120 
121 /* Chip internal frequency for clock calculations */
122 static inline u32 hwkhz(const struct skge_hw *hw)
123 {
124  return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
125 }
126 
127 /* Microseconds to chip HZ */
128 static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
129 {
130  return hwkhz(hw) * usec / 1000;
131 }
132 
134 static void skge_led(struct skge_port *skge, enum led_mode mode)
135 {
136  struct skge_hw *hw = skge->hw;
137  int port = skge->port;
138 
139  if (hw->chip_id == CHIP_ID_GENESIS) {
140  switch (mode) {
141  case LED_MODE_OFF:
142  if (hw->phy_type == SK_PHY_BCOM)
144  else {
147  }
151  break;
152 
153  case LED_MODE_ON:
156 
159 
160  break;
161 
162  case LED_MODE_TST:
166 
167  if (hw->phy_type == SK_PHY_BCOM)
169  else {
173  }
174 
175  }
176  } else {
177  switch (mode) {
178  case LED_MODE_OFF:
186  break;
187  case LED_MODE_ON:
193 
196  (skge->speed == SPEED_100 ?
198  break;
199  case LED_MODE_TST:
207  }
208  }
209 }
210 
211 /*
212  * I've left in these EEPROM and VPD functions, as someone may desire to
213  * integrate them in the future. -mdeck
214  *
215  * static int skge_get_eeprom_len(struct net_device *dev)
216  * {
217  * struct skge_port *skge = dev->priv;
218  * u32 reg2;
219  *
220  * pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
221  * return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
222  * }
223  *
224  * static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
225  * {
226  * u32 val;
227  *
228  * pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
229  *
230  * do {
231  * pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
232  * } while (!(offset & PCI_VPD_ADDR_F));
233  *
234  * pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
235  * return val;
236  * }
237  *
238  * static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
239  * {
240  * pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
241  * pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
242  * offset | PCI_VPD_ADDR_F);
243  *
244  * do {
245  * pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
246  * } while (offset & PCI_VPD_ADDR_F);
247  * }
248  *
249  * static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
250  * u8 *data)
251  * {
252  * struct skge_port *skge = dev->priv;
253  * struct pci_dev *pdev = skge->hw->pdev;
254  * int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
255  * int length = eeprom->len;
256  * u16 offset = eeprom->offset;
257  *
258  * if (!cap)
259  * return -EINVAL;
260  *
261  * eeprom->magic = SKGE_EEPROM_MAGIC;
262  *
263  * while (length > 0) {
264  * u32 val = skge_vpd_read(pdev, cap, offset);
265  * int n = min_t(int, length, sizeof(val));
266  *
267  * memcpy(data, &val, n);
268  * length -= n;
269  * data += n;
270  * offset += n;
271  * }
272  * return 0;
273  * }
274  *
275  * static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
276  * u8 *data)
277  * {
278  * struct skge_port *skge = dev->priv;
279  * struct pci_dev *pdev = skge->hw->pdev;
280  * int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
281  * int length = eeprom->len;
282  * u16 offset = eeprom->offset;
283  *
284  * if (!cap)
285  * return -EINVAL;
286  *
287  * if (eeprom->magic != SKGE_EEPROM_MAGIC)
288  * return -EINVAL;
289  *
290  * while (length > 0) {
291  * u32 val;
292  * int n = min_t(int, length, sizeof(val));
293  *
294  * if (n < sizeof(val))
295  * val = skge_vpd_read(pdev, cap, offset);
296  * memcpy(&val, data, n);
297  *
298  * skge_vpd_write(pdev, cap, offset, val);
299  *
300  * length -= n;
301  * data += n;
302  * offset += n;
303  * }
304  * return 0;
305  * }
306  */
307 
308 /*
309  * Allocate ring elements and chain them together
310  * One-to-one association of board descriptors with ring elements
311  */
312 static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base,
313  size_t num)
314 {
315  struct skge_tx_desc *d;
316  struct skge_element *e;
317  unsigned int i;
318 
319  ring->start = zalloc(num*sizeof(*e));
320  if (!ring->start)
321  return -ENOMEM;
322 
323  for (i = 0, e = ring->start, d = vaddr; i < num; i++, e++, d++) {
324  e->desc = d;
325  if (i == num - 1) {
326  e->next = ring->start;
327  d->next_offset = base;
328  } else {
329  e->next = e + 1;
330  d->next_offset = base + (i+1) * sizeof(*d);
331  }
332  }
333  ring->to_use = ring->to_clean = ring->start;
334 
335  return 0;
336 }
337 
338 /* Allocate and setup a new buffer for receiving */
339 static void skge_rx_setup(struct skge_port *skge __unused,
340  struct skge_element *e,
341  struct io_buffer *iob, unsigned int bufsize)
342 {
343  struct skge_rx_desc *rd = e->desc;
344  u64 map;
345 
346  map = ( iob != NULL ) ? virt_to_bus(iob->data) : 0;
347 
348  rd->dma_lo = map;
349  rd->dma_hi = map >> 32;
350  e->iob = iob;
351  rd->csum1_start = ETH_HLEN;
352  rd->csum2_start = ETH_HLEN;
353  rd->csum1 = 0;
354  rd->csum2 = 0;
355 
356  wmb();
357 
359 }
360 
361 /* Resume receiving using existing skb,
362  * Note: DMA address is not changed by chip.
363  * MTU not changed while receiver active.
364  */
365 static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
366 {
367  struct skge_rx_desc *rd = e->desc;
368 
369  rd->csum2 = 0;
370  rd->csum2_start = ETH_HLEN;
371 
372  wmb();
373 
375 }
376 
377 
378 /* Free all buffers in receive ring, assumes receiver stopped */
379 static void skge_rx_clean(struct skge_port *skge)
380 {
381  struct skge_ring *ring = &skge->rx_ring;
382  struct skge_element *e;
383 
384  e = ring->start;
385  do {
386  struct skge_rx_desc *rd = e->desc;
387  rd->control = 0;
388  if (e->iob) {
389  free_iob(e->iob);
390  e->iob = NULL;
391  }
392  } while ((e = e->next) != ring->start);
393 }
394 
395 static void skge_link_up(struct skge_port *skge)
396 {
397  skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
399 
400  netdev_link_up(skge->netdev);
401 
402  DBG2(PFX "%s: Link is up at %d Mbps, %s duplex\n",
403  skge->netdev->name, skge->speed,
404  skge->duplex == DUPLEX_FULL ? "full" : "half");
405 }
406 
407 static void skge_link_down(struct skge_port *skge)
408 {
409  skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
410  netdev_link_down(skge->netdev);
411 
412  DBG2(PFX "%s: Link is down.\n", skge->netdev->name);
413 }
414 
415 
416 static void xm_link_down(struct skge_hw *hw, int port)
417 {
418  struct net_device *dev = hw->dev[port];
419  struct skge_port *skge = dev->priv;
420 
422 
423  if (netdev_link_ok(dev))
424  skge_link_down(skge);
425 }
426 
427 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
428 {
429  int i;
430 
431  xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
433 
434  if (hw->phy_type == SK_PHY_XMAC)
435  goto ready;
436 
437  for (i = 0; i < PHY_RETRIES; i++) {
439  goto ready;
440  udelay(1);
441  }
442 
443  return -ETIMEDOUT;
444  ready:
446 
447  return 0;
448 }
449 
450 static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
451 {
452  u16 v = 0;
453  if (__xm_phy_read(hw, port, reg, &v))
454  DBG(PFX "%s: phy read timed out\n",
455  hw->dev[port]->name);
456  return v;
457 }
458 
459 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
460 {
461  int i;
462 
463  xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
464  for (i = 0; i < PHY_RETRIES; i++) {
466  goto ready;
467  udelay(1);
468  }
469  return -EIO;
470 
471  ready:
473  for (i = 0; i < PHY_RETRIES; i++) {
475  return 0;
476  udelay(1);
477  }
478  return -ETIMEDOUT;
479 }
480 
481 static void genesis_init(struct skge_hw *hw)
482 {
483  /* set blink source counter */
486 
487  /* configure mac arbiter */
489 
490  /* configure mac arbiter timeout values */
495 
500 
501  /* configure packet arbiter timeout */
507 }
508 
509 static void genesis_reset(struct skge_hw *hw, int port)
510 {
511  const u8 zero[8] = { 0 };
512  u32 reg;
513 
515 
516  /* reset the statistics module */
519  xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
520  xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
521  xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
522 
523  /* disable Broadcom PHY IRQ */
524  if (hw->phy_type == SK_PHY_BCOM)
525  xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
526 
527  xm_outhash(hw, port, XM_HSM, zero);
528 
529  /* Flush TX and RX fifo */
530  reg = xm_read32(hw, port, XM_MODE);
533 }
534 
535 
536 /* Convert mode to MII values */
537 static const u16 phy_pause_map[] = {
538  [FLOW_MODE_NONE] = 0,
542 };
543 
544 /* special defines for FIBER (88E1011S only) */
545 static const u16 fiber_pause_map[] = {
550 };
551 
552 
553 /* Check status of Broadcom phy link */
554 static void bcom_check_link(struct skge_hw *hw, int port)
555 {
556  struct net_device *dev = hw->dev[port];
557  struct skge_port *skge = dev->priv;
558  u16 status;
559 
560  /* read twice because of latch */
563 
564  if ((status & PHY_ST_LSYNC) == 0) {
565  xm_link_down(hw, port);
566  return;
567  }
568 
569  if (skge->autoneg == AUTONEG_ENABLE) {
570  u16 lpa, aux;
571 
572  if (!(status & PHY_ST_AN_OVER))
573  return;
574 
576  if (lpa & PHY_B_AN_RF) {
577  DBG(PFX "%s: remote fault\n",
578  dev->name);
579  return;
580  }
581 
583 
584  /* Check Duplex mismatch */
585  switch (aux & PHY_B_AS_AN_RES_MSK) {
586  case PHY_B_RES_1000FD:
587  skge->duplex = DUPLEX_FULL;
588  break;
589  case PHY_B_RES_1000HD:
590  skge->duplex = DUPLEX_HALF;
591  break;
592  default:
593  DBG(PFX "%s: duplex mismatch\n",
594  dev->name);
595  return;
596  }
597 
598  /* We are using IEEE 802.3z/D5.0 Table 37-4 */
599  switch (aux & PHY_B_AS_PAUSE_MSK) {
600  case PHY_B_AS_PAUSE_MSK:
602  break;
603  case PHY_B_AS_PRR:
605  break;
606  case PHY_B_AS_PRT:
608  break;
609  default:
610  skge->flow_status = FLOW_STAT_NONE;
611  }
612  skge->speed = SPEED_1000;
613  }
614 
615  if (!netdev_link_ok(dev))
616  genesis_link_up(skge);
617 }
618 
619 /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
620  * Phy on for 100 or 10Mbit operation
621  */
622 static void bcom_phy_init(struct skge_port *skge)
623 {
624  struct skge_hw *hw = skge->hw;
625  int port = skge->port;
626  unsigned int i;
627  u16 id1, r, ext, ctl;
628 
629  /* magic workaround patterns for Broadcom */
630  static const struct {
631  u16 reg;
632  u16 val;
633  } A1hack[] = {
634  { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
635  { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
636  { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
637  { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
638  }, C0hack[] = {
639  { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
640  { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
641  };
642 
643  /* read Id from external PHY (all have the same address) */
644  id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
645 
646  /* Optimize MDIO transfer by suppressing preamble. */
648  r |= XM_MMU_NO_PRE;
650 
651  switch (id1) {
652  case PHY_BCOM_ID1_C0:
653  /*
654  * Workaround BCOM Errata for the C0 type.
655  * Write magic patterns to reserved registers.
656  */
657  for (i = 0; i < ARRAY_SIZE(C0hack); i++)
659  C0hack[i].reg, C0hack[i].val);
660 
661  break;
662  case PHY_BCOM_ID1_A1:
663  /*
664  * Workaround BCOM Errata for the A1 type.
665  * Write magic patterns to reserved registers.
666  */
667  for (i = 0; i < ARRAY_SIZE(A1hack); i++)
669  A1hack[i].reg, A1hack[i].val);
670  break;
671  }
672 
673  /*
674  * Workaround BCOM Errata (#10523) for all BCom PHYs.
675  * Disable Power Management after reset.
676  */
678  r |= PHY_B_AC_DIS_PM;
680 
681  /* Dummy read */
683 
684  ext = PHY_B_PEC_EN_LTR; /* enable tx led */
685  ctl = PHY_CT_SP1000; /* always 1000mbit */
686 
687  if (skge->autoneg == AUTONEG_ENABLE) {
688  /*
689  * Workaround BCOM Errata #1 for the C5 type.
690  * 1000Base-T Link Acquisition Failure in Slave Mode
691  * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
692  */
693  u16 adv = PHY_B_1000C_RD;
695  adv |= PHY_B_1000C_AHD;
697  adv |= PHY_B_1000C_AFD;
699 
700  ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
701  } else {
702  if (skge->duplex == DUPLEX_FULL)
703  ctl |= PHY_CT_DUP_MD;
704  /* Force to slave */
706  }
707 
708  /* Set autonegotiation pause parameters */
711 
714 
715  /* Use link status change interrupt */
717 }
718 
719 static void xm_phy_init(struct skge_port *skge)
720 {
721  struct skge_hw *hw = skge->hw;
722  int port = skge->port;
723  u16 ctrl = 0;
724 
725  if (skge->autoneg == AUTONEG_ENABLE) {
727  ctrl |= PHY_X_AN_HD;
729  ctrl |= PHY_X_AN_FD;
730 
732 
734 
735  /* Restart Auto-negotiation */
737  } else {
738  /* Set DuplexMode in Config register */
739  if (skge->duplex == DUPLEX_FULL)
740  ctrl |= PHY_CT_DUP_MD;
741  /*
742  * Do NOT enable Auto-negotiation here. This would hold
743  * the link down because no IDLEs are transmitted
744  */
745  }
746 
748 
749  /* Poll PHY for status changes */
750  skge->use_xm_link_timer = 1;
751 }
752 
753 static int xm_check_link(struct net_device *dev)
754 {
755  struct skge_port *skge = dev->priv;
756  struct skge_hw *hw = skge->hw;
757  int port = skge->port;
758  u16 status;
759 
760  /* read twice because of latch */
763 
764  if ((status & PHY_ST_LSYNC) == 0) {
765  xm_link_down(hw, port);
766  return 0;
767  }
768 
769  if (skge->autoneg == AUTONEG_ENABLE) {
770  u16 lpa, res;
771 
772  if (!(status & PHY_ST_AN_OVER))
773  return 0;
774 
776  if (lpa & PHY_B_AN_RF) {
777  DBG(PFX "%s: remote fault\n",
778  dev->name);
779  return 0;
780  }
781 
783 
784  /* Check Duplex mismatch */
785  switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
786  case PHY_X_RS_FD:
787  skge->duplex = DUPLEX_FULL;
788  break;
789  case PHY_X_RS_HD:
790  skge->duplex = DUPLEX_HALF;
791  break;
792  default:
793  DBG(PFX "%s: duplex mismatch\n",
794  dev->name);
795  return 0;
796  }
797 
798  /* We are using IEEE 802.3z/D5.0 Table 37-4 */
799  if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
801  (lpa & PHY_X_P_SYM_MD))
803  else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
804  (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
805  /* Enable PAUSE receive, disable PAUSE transmit */
807  else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
808  (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
809  /* Disable PAUSE receive, enable PAUSE transmit */
811  else
812  skge->flow_status = FLOW_STAT_NONE;
813 
814  skge->speed = SPEED_1000;
815  }
816 
817  if (!netdev_link_ok(dev))
818  genesis_link_up(skge);
819  return 1;
820 }
821 
822 /* Poll to check for link coming up.
823  *
824  * Since internal PHY is wired to a level triggered pin, can't
825  * get an interrupt when carrier is detected, need to poll for
826  * link coming up.
827  */
828 static void xm_link_timer(struct skge_port *skge)
829 {
830  struct net_device *dev = skge->netdev;
831  struct skge_hw *hw = skge->hw;
832  int port = skge->port;
833  int i;
834 
835  /*
836  * Verify that the link by checking GPIO register three times.
837  * This pin has the signal from the link_sync pin connected to it.
838  */
839  for (i = 0; i < 3; i++) {
841  return;
842  }
843 
844  /* Re-enable interrupt to detect link down */
845  if (xm_check_link(dev)) {
846  u16 msk = xm_read16(hw, port, XM_IMSK);
847  msk &= ~XM_IS_INP_ASS;
848  xm_write16(hw, port, XM_IMSK, msk);
850  }
851 }
852 
853 static void genesis_mac_init(struct skge_hw *hw, int port)
854 {
855  struct net_device *dev = hw->dev[port];
856  struct skge_port *skge = dev->priv;
857  int i;
858  u32 r;
859  const u8 zero[6] = { 0 };
860 
861  for (i = 0; i < 10; i++) {
865  goto reset_ok;
866  udelay(1);
867  }
868 
869  DBG(PFX "%s: genesis reset failed\n", dev->name);
870 
871  reset_ok:
872  /* Unreset the XMAC. */
874 
875  /*
876  * Perform additional initialization for external PHYs,
877  * namely for the 1000baseTX cards that use the XMAC's
878  * GMII mode.
879  */
880  if (hw->phy_type != SK_PHY_XMAC) {
881  /* Take external Phy out of reset */
882  r = skge_read32(hw, B2_GP_IO);
883  if (port == 0)
884  r |= GP_DIR_0|GP_IO_0;
885  else
886  r |= GP_DIR_2|GP_IO_2;
887 
889 
890  /* Enable GMII interface */
892  }
893 
894 
895  switch(hw->phy_type) {
896  case SK_PHY_XMAC:
897  xm_phy_init(skge);
898  break;
899  case SK_PHY_BCOM:
900  bcom_phy_init(skge);
902  }
903 
904  /* Set Station Address */
905  xm_outaddr(hw, port, XM_SA, dev->ll_addr);
906 
907  /* We don't use match addresses so clear */
908  for (i = 1; i < 16; i++)
909  xm_outaddr(hw, port, XM_EXM(i), zero);
910 
911  /* Clear MIB counters */
914  /* Clear two times according to Errata #3 */
917 
918  /* configure Rx High Water Mark (XM_RX_HI_WM) */
919  xm_write16(hw, port, XM_RX_HI_WM, 1450);
920 
921  /* We don't need the FCS appended to the packet. */
923 
924  if (skge->duplex == DUPLEX_HALF) {
925  /*
926  * If in manual half duplex mode the other side might be in
927  * full duplex mode, so ignore if a carrier extension is not seen
928  * on frames received
929  */
930  r |= XM_RX_DIS_CEXT;
931  }
933 
934  /* We want short frames padded to 60 bytes. */
936 
937  xm_write16(hw, port, XM_TX_THR, 512);
938 
939  /*
940  * Enable the reception of all error frames. This is is
941  * a necessary evil due to the design of the XMAC. The
942  * XMAC's receive FIFO is only 8K in size, however jumbo
943  * frames can be up to 9000 bytes in length. When bad
944  * frame filtering is enabled, the XMAC's RX FIFO operates
945  * in 'store and forward' mode. For this to work, the
946  * entire frame has to fit into the FIFO, but that means
947  * that jumbo frames larger than 8192 bytes will be
948  * truncated. Disabling all bad frame filtering causes
949  * the RX FIFO to operate in streaming mode, in which
950  * case the XMAC will start transferring frames out of the
951  * RX FIFO as soon as the FIFO threshold is reached.
952  */
954 
955 
956  /*
957  * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
958  * - Enable all bits excepting 'Octets Rx OK Low CntOv'
959  * and 'Octets Rx OK Hi Cnt Ov'.
960  */
962 
963  /*
964  * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
965  * - Enable all bits excepting 'Octets Tx OK Low CntOv'
966  * and 'Octets Tx OK Hi Cnt Ov'.
967  */
969 
970  /* Configure MAC arbiter */
972 
973  /* configure timeout values */
978 
983 
984  /* Configure Rx MAC FIFO */
988 
989  /* Configure Tx MAC FIFO */
993 
994  /* enable timeout timers */
996  (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
997 }
998 
999 static void genesis_stop(struct skge_port *skge)
1000 {
1001  struct skge_hw *hw = skge->hw;
1002  int port = skge->port;
1003  unsigned retries = 1000;
1004  u16 cmd;
1005 
1006  /* Disable Tx and Rx */
1010 
1011  genesis_reset(hw, port);
1012 
1013  /* Clear Tx packet arbiter timeout IRQ */
1015  port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1016 
1017  /* Reset the MAC */
1019  do {
1022  break;
1023  } while (--retries > 0);
1024 
1025  /* For external PHYs there must be special handling */
1026  if (hw->phy_type != SK_PHY_XMAC) {
1028  if (port == 0) {
1029  reg |= GP_DIR_0;
1030  reg &= ~GP_IO_0;
1031  } else {
1032  reg |= GP_DIR_2;
1033  reg &= ~GP_IO_2;
1034  }
1037  }
1038 
1041  & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1042 
1044 }
1045 
1046 static void genesis_link_up(struct skge_port *skge)
1047 {
1048  struct skge_hw *hw = skge->hw;
1049  int port = skge->port;
1050  u16 cmd, msk;
1051  u32 mode;
1052 
1054 
1055  /*
1056  * enabling pause frame reception is required for 1000BT
1057  * because the XMAC is not reset if the link is going down
1058  */
1059  if (skge->flow_status == FLOW_STAT_NONE ||
1061  /* Disable Pause Frame Reception */
1062  cmd |= XM_MMU_IGN_PF;
1063  else
1064  /* Enable Pause Frame Reception */
1065  cmd &= ~XM_MMU_IGN_PF;
1066 
1068 
1069  mode = xm_read32(hw, port, XM_MODE);
1070  if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
1071  skge->flow_status == FLOW_STAT_LOC_SEND) {
1072  /*
1073  * Configure Pause Frame Generation
1074  * Use internal and external Pause Frame Generation.
1075  * Sending pause frames is edge triggered.
1076  * Send a Pause frame with the maximum pause time if
1077  * internal oder external FIFO full condition occurs.
1078  * Send a zero pause time frame to re-start transmission.
1079  */
1080  /* XM_PAUSE_DA = '010000C28001' (default) */
1081  /* XM_MAC_PTIME = 0xffff (maximum) */
1082  /* remember this value is defined in big endian (!) */
1083  xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1084 
1085  mode |= XM_PAUSE_MODE;
1087  } else {
1088  /*
1089  * disable pause frame generation is required for 1000BT
1090  * because the XMAC is not reset if the link is going down
1091  */
1092  /* Disable Pause Mode in Mode Register */
1093  mode &= ~XM_PAUSE_MODE;
1094 
1096  }
1097 
1099 
1100  /* Turn on detection of Tx underrun */
1101  msk = xm_read16(hw, port, XM_IMSK);
1102  msk &= ~XM_IS_TXF_UR;
1103  xm_write16(hw, port, XM_IMSK, msk);
1104 
1105  xm_read16(hw, port, XM_ISRC);
1106 
1107  /* get MMU Command Reg. */
1109  if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
1110  cmd |= XM_MMU_GMII_FD;
1111 
1112  /*
1113  * Workaround BCOM Errata (#10523) for all BCom Phys
1114  * Enable Power Management after link up
1115  */
1116  if (hw->phy_type == SK_PHY_BCOM) {
1119  & ~PHY_B_AC_DIS_PM);
1121  }
1122 
1123  /* enable Rx/Tx */
1126  skge_link_up(skge);
1127 }
1128 
1129 
1130 static inline void bcom_phy_intr(struct skge_port *skge)
1131 {
1132  struct skge_hw *hw = skge->hw;
1133  int port = skge->port;
1134  u16 isrc;
1135 
1137  DBGIO(PFX "%s: phy interrupt status 0x%x\n",
1138  skge->netdev->name, isrc);
1139 
1140  if (isrc & PHY_B_IS_PSE)
1141  DBG(PFX "%s: uncorrectable pair swap error\n",
1142  hw->dev[port]->name);
1143 
1144  /* Workaround BCom Errata:
1145  * enable and disable loopback mode if "NO HCD" occurs.
1146  */
1147  if (isrc & PHY_B_IS_NO_HDCL) {
1150  ctrl | PHY_CT_LOOP);
1152  ctrl & ~PHY_CT_LOOP);
1153  }
1154 
1155  if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1157 
1158 }
1159 
1160 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1161 {
1162  int i;
1163 
1166  GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1167  for (i = 0; i < PHY_RETRIES; i++) {
1168  udelay(1);
1169 
1171  return 0;
1172  }
1173 
1174  DBG(PFX "%s: phy write timeout port %x reg %x val %x\n",
1175  hw->dev[port]->name,
1176  port, reg, val);
1177  return -EIO;
1178 }
1179 
1180 static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1181 {
1182  int i;
1183 
1185  GM_SMI_CT_PHY_AD(hw->phy_addr)
1187 
1188  for (i = 0; i < PHY_RETRIES; i++) {
1189  udelay(1);
1191  goto ready;
1192  }
1193 
1194  return -ETIMEDOUT;
1195  ready:
1197  return 0;
1198 }
1199 
1200 static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1201 {
1202  u16 v = 0;
1203  if (__gm_phy_read(hw, port, reg, &v))
1204  DBG(PFX "%s: phy read timeout port %x reg %x val %x\n",
1205  hw->dev[port]->name,
1206  port, reg, v);
1207  return v;
1208 }
1209 
1210 /* Marvell Phy Initialization */
1211 static void yukon_init(struct skge_hw *hw, int port)
1212 {
1213  struct skge_port *skge = hw->dev[port]->priv;
1214  u16 ctrl, ct1000, adv;
1215 
1216  if (skge->autoneg == AUTONEG_ENABLE) {
1218 
1219  ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1222 
1223  ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1224 
1226  }
1227 
1229  if (skge->autoneg == AUTONEG_DISABLE)
1230  ctrl &= ~PHY_CT_ANE;
1231 
1232  ctrl |= PHY_CT_RESET;
1234 
1235  ctrl = 0;
1236  ct1000 = 0;
1237  adv = PHY_AN_CSMA;
1238 
1239  if (skge->autoneg == AUTONEG_ENABLE) {
1240  if (hw->copper) {
1242  ct1000 |= PHY_M_1000C_AFD;
1244  ct1000 |= PHY_M_1000C_AHD;
1246  adv |= PHY_M_AN_100_FD;
1248  adv |= PHY_M_AN_100_HD;
1250  adv |= PHY_M_AN_10_FD;
1252  adv |= PHY_M_AN_10_HD;
1253 
1254  /* Set Flow-control capabilities */
1255  adv |= phy_pause_map[skge->flow_control];
1256  } else {
1258  adv |= PHY_M_AN_1000X_AFD;
1260  adv |= PHY_M_AN_1000X_AHD;
1261 
1262  adv |= fiber_pause_map[skge->flow_control];
1263  }
1264 
1265  /* Restart Auto-negotiation */
1267  } else {
1268  /* forced speed/duplex settings */
1269  ct1000 = PHY_M_1000C_MSE;
1270 
1271  if (skge->duplex == DUPLEX_FULL)
1272  ctrl |= PHY_CT_DUP_MD;
1273 
1274  switch (skge->speed) {
1275  case SPEED_1000:
1276  ctrl |= PHY_CT_SP1000;
1277  break;
1278  case SPEED_100:
1279  ctrl |= PHY_CT_SP100;
1280  break;
1281  }
1282 
1283  ctrl |= PHY_CT_RESET;
1284  }
1285 
1287 
1290 
1291  /* Enable phy interrupt on autonegotiation complete (or link up) */
1292  if (skge->autoneg == AUTONEG_ENABLE)
1294  else
1296 }
1297 
1298 static void yukon_reset(struct skge_hw *hw, int port)
1299 {
1300  gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1301  gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1305 
1309 }
1310 
1311 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1312 static int is_yukon_lite_a0(struct skge_hw *hw)
1313 {
1314  u32 reg;
1315  int ret;
1316 
1317  if (hw->chip_id != CHIP_ID_YUKON)
1318  return 0;
1319 
1320  reg = skge_read32(hw, B2_FAR);
1321  skge_write8(hw, B2_FAR + 3, 0xff);
1322  ret = (skge_read8(hw, B2_FAR + 3) != 0);
1324  return ret;
1325 }
1326 
1327 static void yukon_mac_init(struct skge_hw *hw, int port)
1328 {
1329  struct skge_port *skge = hw->dev[port]->priv;
1330  int i;
1331  u32 reg;
1332  const u8 *addr = hw->dev[port]->ll_addr;
1333 
1334  /* WA code for COMA mode -- set PHY reset */
1335  if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1336  hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1337  reg = skge_read32(hw, B2_GP_IO);
1338  reg |= GP_DIR_9 | GP_IO_9;
1340  }
1341 
1342  /* hard reset */
1345 
1346  /* WA code for COMA mode -- clear PHY reset */
1347  if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1348  hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1349  reg = skge_read32(hw, B2_GP_IO);
1350  reg |= GP_DIR_9;
1351  reg &= ~GP_IO_9;
1353  }
1354 
1355  /* Set hardware config mode */
1359 
1360  /* Clear GMC reset */
1364 
1365  if (skge->autoneg == AUTONEG_DISABLE) {
1369 
1370  switch (skge->speed) {
1371  case SPEED_1000:
1372  reg &= ~GM_GPCR_SPEED_100;
1374  break;
1375  case SPEED_100:
1376  reg &= ~GM_GPCR_SPEED_1000;
1378  break;
1379  case SPEED_10:
1381  break;
1382  }
1383 
1384  if (skge->duplex == DUPLEX_FULL)
1385  reg |= GM_GPCR_DUP_FULL;
1386  } else
1388 
1389  switch (skge->flow_control) {
1390  case FLOW_MODE_NONE:
1393  break;
1394  case FLOW_MODE_LOC_SEND:
1395  /* disable Rx flow-control */
1397  break;
1398  case FLOW_MODE_SYMMETRIC:
1399  case FLOW_MODE_SYM_OR_REM:
1400  /* enable Tx & Rx flow-control */
1401  break;
1402  }
1403 
1406 
1407  yukon_init(hw, port);
1408 
1409  /* MIB clear */
1412 
1413  for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1414  gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1416 
1417  /* transmit control */
1419 
1420  /* receive control reg: unicast + multicast + no FCS */
1423 
1424  /* transmit flow control */
1425  gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1426 
1427  /* transmit parameter */
1432 
1433  /* configure the Serial Mode Register */
1437 
1439 
1440  /* physical address: used for pause frames */
1442  /* virtual address for data */
1444 
1445  /* enable interrupt mask for counter overflows */
1449 
1450  /* Initialize Mac Fifo */
1451 
1452  /* Configure Rx MAC FIFO */
1455 
1456  /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1457  if (is_yukon_lite_a0(hw))
1458  reg &= ~GMF_RX_F_FL_ON;
1459 
1462  /*
1463  * because Pause Packet Truncation in GMAC is not working
1464  * we have to increase the Flush Threshold to 64 bytes
1465  * in order to flush pause packets in Rx FIFO on Yukon-1
1466  */
1468 
1469  /* Configure Tx MAC FIFO */
1472 }
1473 
1474 /* Go into power down mode */
1475 static void yukon_suspend(struct skge_hw *hw, int port)
1476 {
1477  u16 ctrl;
1478 
1482 
1484  ctrl |= PHY_CT_RESET;
1486 
1487  /* switch IEEE compatible power down mode on */
1489  ctrl |= PHY_CT_PDOWN;
1491 }
1492 
1493 static void yukon_stop(struct skge_port *skge)
1494 {
1495  struct skge_hw *hw = skge->hw;
1496  int port = skge->port;
1497 
1499  yukon_reset(hw, port);
1500 
1505 
1506  yukon_suspend(hw, port);
1507 
1508  /* set GPHY Control reset */
1511 }
1512 
1513 static u16 yukon_speed(const struct skge_hw *hw __unused, u16 aux)
1514 {
1515  switch (aux & PHY_M_PS_SPEED_MSK) {
1516  case PHY_M_PS_SPEED_1000:
1517  return SPEED_1000;
1518  case PHY_M_PS_SPEED_100:
1519  return SPEED_100;
1520  default:
1521  return SPEED_10;
1522  }
1523 }
1524 
1525 static void yukon_link_up(struct skge_port *skge)
1526 {
1527  struct skge_hw *hw = skge->hw;
1528  int port = skge->port;
1529  u16 reg;
1530 
1531  /* Enable Transmit FIFO Underrun */
1533 
1535  if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1536  reg |= GM_GPCR_DUP_FULL;
1537 
1538  /* enable Rx/Tx */
1541 
1543  skge_link_up(skge);
1544 }
1545 
1546 static void yukon_link_down(struct skge_port *skge)
1547 {
1548  struct skge_hw *hw = skge->hw;
1549  int port = skge->port;
1550  u16 ctrl;
1551 
1555 
1556  if (skge->flow_status == FLOW_STAT_REM_SEND) {
1558  ctrl |= PHY_M_AN_ASP;
1559  /* restore Asymmetric Pause bit */
1561  }
1562 
1563  skge_link_down(skge);
1564 
1565  yukon_init(hw, port);
1566 }
1567 
1568 static void yukon_phy_intr(struct skge_port *skge)
1569 {
1570  struct skge_hw *hw = skge->hw;
1571  int port = skge->port;
1572  const char *reason = NULL;
1573  u16 istatus, phystat;
1574 
1575  istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1576  phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1577 
1578  DBGIO(PFX "%s: phy interrupt status 0x%x 0x%x\n",
1579  skge->netdev->name, istatus, phystat);
1580 
1581  if (istatus & PHY_M_IS_AN_COMPL) {
1583  & PHY_M_AN_RF) {
1584  reason = "remote fault";
1585  goto failed;
1586  }
1587 
1589  reason = "master/slave fault";
1590  goto failed;
1591  }
1592 
1593  if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1594  reason = "speed/duplex";
1595  goto failed;
1596  }
1597 
1598  skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
1600  skge->speed = yukon_speed(hw, phystat);
1601 
1602  /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1603  switch (phystat & PHY_M_PS_PAUSE_MSK) {
1604  case PHY_M_PS_PAUSE_MSK:
1606  break;
1607  case PHY_M_PS_RX_P_EN:
1609  break;
1610  case PHY_M_PS_TX_P_EN:
1612  break;
1613  default:
1614  skge->flow_status = FLOW_STAT_NONE;
1615  }
1616 
1617  if (skge->flow_status == FLOW_STAT_NONE ||
1618  (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
1620  else
1622  yukon_link_up(skge);
1623  return;
1624  }
1625 
1626  if (istatus & PHY_M_IS_LSP_CHANGE)
1627  skge->speed = yukon_speed(hw, phystat);
1628 
1629  if (istatus & PHY_M_IS_DUP_CHANGE)
1630  skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1631  if (istatus & PHY_M_IS_LST_CHANGE) {
1632  if (phystat & PHY_M_PS_LINK_UP)
1633  yukon_link_up(skge);
1634  else
1635  yukon_link_down(skge);
1636  }
1637  return;
1638  failed:
1639  DBG(PFX "%s: autonegotiation failed (%s)\n",
1640  skge->netdev->name, reason);
1641 
1642  /* XXX restart autonegotiation? */
1643 }
1644 
1645 static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
1646 {
1647  u32 end;
1648 
1649  start /= 8;
1650  len /= 8;
1651  end = start + len - 1;
1652 
1657  skge_write32(hw, RB_ADDR(q, RB_END), end);
1658 
1659  if (q == Q_R1 || q == Q_R2) {
1660  /* Set thresholds on receive queue's */
1662  start + (2*len)/3);
1664  start + (len/3));
1665  } else {
1666  /* Enable store & forward on Tx queue's because
1667  * Tx FIFO is only 4K on Genesis and 1K on Yukon
1668  */
1670  }
1671 
1673 }
1674 
1675 /* Setup Bus Memory Interface */
1676 static void skge_qset(struct skge_port *skge, u16 q,
1677  const struct skge_element *e)
1678 {
1679  struct skge_hw *hw = skge->hw;
1680  u32 watermark = 0x600;
1681  u64 base = skge->dma + (e->desc - skge->mem);
1682 
1683  /* optimization to reduce window on 32bit/33mhz */
1684  if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
1685  watermark /= 2;
1686 
1688  skge_write32(hw, Q_ADDR(q, Q_F), watermark);
1689  skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
1690  skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
1691 }
1692 
1693 void skge_free(struct net_device *dev)
1694 {
1695  struct skge_port *skge = dev->priv;
1696 
1697  free(skge->rx_ring.start);
1698  skge->rx_ring.start = NULL;
1699 
1700  free(skge->tx_ring.start);
1701  skge->tx_ring.start = NULL;
1702 
1703  free_phys(skge->mem, RING_SIZE);
1704  skge->mem = NULL;
1705  skge->dma = 0;
1706 }
1707 
1708 static int skge_up(struct net_device *dev)
1709 {
1710  struct skge_port *skge = dev->priv;
1711  struct skge_hw *hw = skge->hw;
1712  int port = skge->port;
1713  u32 chunk, ram_addr;
1714  int err;
1715 
1716  DBG2(PFX "%s: enabling interface\n", dev->name);
1717 
1719  skge->dma = virt_to_bus(skge->mem);
1720  if (!skge->mem)
1721  return -ENOMEM;
1722  memset(skge->mem, 0, RING_SIZE);
1723 
1724  assert(!(skge->dma & 7));
1725 
1726  /* FIXME: find out whether 64 bit iPXE will be loaded > 4GB */
1727  if ((u64)skge->dma >> 32 != ((u64) skge->dma + RING_SIZE) >> 32) {
1728  DBG(PFX "pci_alloc_consistent region crosses 4G boundary\n");
1729  err = -EINVAL;
1730  goto err;
1731  }
1732 
1733  err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma, NUM_RX_DESC);
1734  if (err)
1735  goto err;
1736 
1737  /* this call relies on e->iob and d->control to be 0
1738  * This is assured by calling memset() on skge->mem and using zalloc()
1739  * for the skge_element structures.
1740  */
1742 
1743  err = skge_ring_alloc(&skge->tx_ring, skge->mem + RX_RING_SIZE,
1744  skge->dma + RX_RING_SIZE, NUM_TX_DESC);
1745  if (err)
1746  goto err;
1747 
1748  /* Initialize MAC */
1749  if (hw->chip_id == CHIP_ID_GENESIS)
1751  else
1753 
1754  /* Configure RAMbuffers - equally between ports and tx/rx */
1755  chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2);
1756  ram_addr = hw->ram_offset + 2 * chunk * port;
1757 
1758  skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
1759  skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
1760 
1761  assert(!(skge->tx_ring.to_use != skge->tx_ring.to_clean));
1762  skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
1763  skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
1764 
1765  /* Start receiver BMU */
1766  wmb();
1768  skge_led(skge, LED_MODE_ON);
1769 
1770  hw->intr_mask |= portmask[port];
1771  skge_write32(hw, B0_IMSK, hw->intr_mask);
1772 
1773  return 0;
1774 
1775  err:
1776  skge_rx_clean(skge);
1777  skge_free(dev);
1778 
1779  return err;
1780 }
1781 
1782 /* stop receiver */
1783 static void skge_rx_stop(struct skge_hw *hw, int port)
1784 {
1789 }
1790 
1791 static void skge_down(struct net_device *dev)
1792 {
1793  struct skge_port *skge = dev->priv;
1794  struct skge_hw *hw = skge->hw;
1795  int port = skge->port;
1796 
1797  if (skge->mem == NULL)
1798  return;
1799 
1800  DBG2(PFX "%s: disabling interface\n", dev->name);
1801 
1802  if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
1803  skge->use_xm_link_timer = 0;
1804 
1806 
1807  hw->intr_mask &= ~portmask[port];
1808  skge_write32(hw, B0_IMSK, hw->intr_mask);
1809 
1810  skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
1811  if (hw->chip_id == CHIP_ID_GENESIS)
1812  genesis_stop(skge);
1813  else
1814  yukon_stop(skge);
1815 
1816  /* Stop transmitter */
1820 
1821 
1822  /* Disable Force Sync bit and Enable Alloc bit */
1825 
1826  /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1829 
1830  /* Reset PCI FIFO */
1833 
1834  /* Reset the RAM Buffer async Tx queue */
1836 
1837  skge_rx_stop(hw, port);
1838 
1839  if (hw->chip_id == CHIP_ID_GENESIS) {
1842  } else {
1845  }
1846 
1847  skge_led(skge, LED_MODE_OFF);
1848 
1849  skge_tx_clean(dev);
1850 
1851  skge_rx_clean(skge);
1852 
1853  skge_free(dev);
1854  return;
1855 }
1856 
1857 static inline int skge_tx_avail(const struct skge_ring *ring)
1858 {
1859  mb();
1860  return ((ring->to_clean > ring->to_use) ? 0 : NUM_TX_DESC)
1861  + (ring->to_clean - ring->to_use) - 1;
1862 }
1863 
1864 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob)
1865 {
1866  struct skge_port *skge = dev->priv;
1867  struct skge_hw *hw = skge->hw;
1868  struct skge_element *e;
1869  struct skge_tx_desc *td;
1870  u32 control, len;
1871  u64 map;
1872 
1873  if (skge_tx_avail(&skge->tx_ring) < 1)
1874  return -EBUSY;
1875 
1876  e = skge->tx_ring.to_use;
1877  td = e->desc;
1878  assert(!(td->control & BMU_OWN));
1879  e->iob = iob;
1880  len = iob_len(iob);
1881  map = virt_to_bus(iob->data);
1882 
1883  td->dma_lo = map;
1884  td->dma_hi = map >> 32;
1885 
1886  control = BMU_CHECK;
1887 
1889  /* Make sure all the descriptors written */
1890  wmb();
1891  td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
1892  wmb();
1893 
1895 
1896  DBGIO(PFX "%s: tx queued, slot %td, len %d\n",
1897  dev->name, e - skge->tx_ring.start, (unsigned int)len);
1898 
1899  skge->tx_ring.to_use = e->next;
1900  wmb();
1901 
1902  if (skge_tx_avail(&skge->tx_ring) <= 1) {
1903  DBG(PFX "%s: transmit queue full\n", dev->name);
1904  }
1905 
1906  return 0;
1907 }
1908 
1909 /* Free all buffers in transmit ring */
1910 static void skge_tx_clean(struct net_device *dev)
1911 {
1912  struct skge_port *skge = dev->priv;
1913  struct skge_element *e;
1914 
1915  for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
1916  struct skge_tx_desc *td = e->desc;
1917  td->control = 0;
1918  }
1919 
1920  skge->tx_ring.to_clean = e;
1921 }
1922 
1923 static inline u16 phy_length(const struct skge_hw *hw, u32 status)
1924 {
1925  if (hw->chip_id == CHIP_ID_GENESIS)
1926  return status >> XMR_FS_LEN_SHIFT;
1927  else
1928  return status >> GMR_FS_LEN_SHIFT;
1929 }
1930 
1931 static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
1932 {
1933  if (hw->chip_id == CHIP_ID_GENESIS)
1934  return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
1935  else
1936  return (status & GMR_FS_ANY_ERR) ||
1937  (status & GMR_FS_RX_OK) == 0;
1938 }
1939 
1940 /* Free all buffers in Tx ring which are no longer owned by device */
1941 static void skge_tx_done(struct net_device *dev)
1942 {
1943  struct skge_port *skge = dev->priv;
1944  struct skge_ring *ring = &skge->tx_ring;
1945  struct skge_element *e;
1946 
1947  skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
1948 
1949  for (e = ring->to_clean; e != ring->to_use; e = e->next) {
1950  u32 control = ((const struct skge_tx_desc *) e->desc)->control;
1951 
1952  if (control & BMU_OWN)
1953  break;
1954 
1955  netdev_tx_complete(dev, e->iob);
1956  }
1957  skge->tx_ring.to_clean = e;
1958 
1959  /* Can run lockless until we need to synchronize to restart queue. */
1960  mb();
1961 }
1962 
1963 static void skge_rx_refill(struct net_device *dev)
1964 {
1965  struct skge_port *skge = dev->priv;
1966  struct skge_ring *ring = &skge->rx_ring;
1967  struct skge_element *e;
1968  struct io_buffer *iob;
1969  struct skge_rx_desc *rd;
1970  u32 control;
1971  int i;
1972 
1973  for (i = 0; i < NUM_RX_DESC; i++) {
1974  e = ring->to_clean;
1975  rd = e->desc;
1976  iob = e->iob;
1977  control = rd->control;
1978 
1979  /* nothing to do here */
1980  if (iob || (control & BMU_OWN))
1981  continue;
1982 
1983  DBG2("refilling rx desc %zd: ", (ring->to_clean - ring->start));
1984 
1985  iob = alloc_iob(RX_BUF_SIZE);
1986  if (iob) {
1987  skge_rx_setup(skge, e, iob, RX_BUF_SIZE);
1988  } else {
1989  DBG("descr %zd: alloc_iob() failed\n",
1990  (ring->to_clean - ring->start));
1991  /* We pass the descriptor to the NIC even if the
1992  * allocation failed. The card will stop as soon as it
1993  * encounters a descriptor with the OWN bit set to 0,
1994  * thus never getting to the next descriptor that might
1995  * contain a valid io_buffer. This would effectively
1996  * stall the receive.
1997  */
1998  skge_rx_setup(skge, e, NULL, 0);
1999  }
2000 
2001  ring->to_clean = e->next;
2002  }
2003 }
2004 
2005 static void skge_rx_done(struct net_device *dev)
2006 {
2007  struct skge_port *skge = dev->priv;
2008  struct skge_ring *ring = &skge->rx_ring;
2009  struct skge_rx_desc *rd;
2010  struct skge_element *e;
2011  struct io_buffer *iob;
2012  u32 control;
2013  u16 len;
2014  int i;
2015 
2016  e = ring->to_clean;
2017  for (i = 0; i < NUM_RX_DESC; i++) {
2018  iob = e->iob;
2019  rd = e->desc;
2020 
2021  rmb();
2022  control = rd->control;
2023 
2024  if ((control & BMU_OWN))
2025  break;
2026 
2027  if (!iob)
2028  continue;
2029 
2030  len = control & BMU_BBC;
2031 
2032  /* catch RX errors */
2033  if ((bad_phy_status(skge->hw, rd->status)) ||
2034  (phy_length(skge->hw, rd->status) != len)) {
2035  /* report receive errors */
2036  DBG("rx error\n");
2037  netdev_rx_err(dev, iob, -EIO);
2038  } else {
2039  DBG2("received packet, len %d\n", len);
2040  iob_put(iob, len);
2041  netdev_rx(dev, iob);
2042  }
2043 
2044  /* io_buffer passed to core, make sure we don't reuse it */
2045  e->iob = NULL;
2046 
2047  e = e->next;
2048  }
2049  skge_rx_refill(dev);
2050 }
2051 
2052 static void skge_poll(struct net_device *dev)
2053 {
2054  struct skge_port *skge = dev->priv;
2055  struct skge_hw *hw = skge->hw;
2056  u32 status;
2057 
2058  /* reading this register ACKs interrupts */
2060 
2061  /* Link event? */
2062  if (status & IS_EXT_REG) {
2063  skge_phyirq(hw);
2064  if (skge->use_xm_link_timer)
2065  xm_link_timer(skge);
2066  }
2067 
2068  skge_tx_done(dev);
2069 
2071 
2072  skge_rx_done(dev);
2073 
2074  /* restart receiver */
2075  wmb();
2077 
2079 
2080  return;
2081 }
2082 
2083 static void skge_phyirq(struct skge_hw *hw)
2084 {
2085  int port;
2086 
2087  for (port = 0; port < hw->ports; port++) {
2088  struct net_device *dev = hw->dev[port];
2089  struct skge_port *skge = dev->priv;
2090 
2091  if (hw->chip_id != CHIP_ID_GENESIS)
2092  yukon_phy_intr(skge);
2093  else if (hw->phy_type == SK_PHY_BCOM)
2094  bcom_phy_intr(skge);
2095  }
2096 
2097  hw->intr_mask |= IS_EXT_REG;
2098  skge_write32(hw, B0_IMSK, hw->intr_mask);
2100 }
2101 
2102 static const struct {
2104  const char *name;
2105 } skge_chips[] = {
2106  { CHIP_ID_GENESIS, "Genesis" },
2107  { CHIP_ID_YUKON, "Yukon" },
2108  { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
2109  { CHIP_ID_YUKON_LP, "Yukon-LP"},
2110 };
2111 
2112 static const char *skge_board_name(const struct skge_hw *hw)
2113 {
2114  unsigned int i;
2115  static char buf[16];
2116 
2117  for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
2118  if (skge_chips[i].id == hw->chip_id)
2119  return skge_chips[i].name;
2120 
2121  snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
2122  return buf;
2123 }
2124 
2125 
2126 /*
2127  * Setup the board data structure, but don't bring up
2128  * the port(s)
2129  */
2130 static int skge_reset(struct skge_hw *hw)
2131 {
2132  u32 reg;
2133  u16 ctst, pci_status;
2134  u8 t8, mac_cfg, pmd_type;
2135  int i;
2136 
2137  ctst = skge_read16(hw, B0_CTST);
2138 
2139  /* do a SW reset */
2142 
2143  /* clear PCI errors, if any */
2146 
2147  pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2149  pci_status | PCI_STATUS_ERROR_BITS);
2152 
2153  /* restore CLK_RUN bits (for Yukon-Lite) */
2156 
2157  hw->chip_id = skge_read8(hw, B2_CHIP_ID);
2158  hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
2159  pmd_type = skge_read8(hw, B2_PMD_TYP);
2160  hw->copper = (pmd_type == 'T' || pmd_type == '1');
2161 
2162  switch (hw->chip_id) {
2163  case CHIP_ID_GENESIS:
2164  switch (hw->phy_type) {
2165  case SK_PHY_XMAC:
2166  hw->phy_addr = PHY_ADDR_XMAC;
2167  break;
2168  case SK_PHY_BCOM:
2169  hw->phy_addr = PHY_ADDR_BCOM;
2170  break;
2171  default:
2172  DBG(PFX "unsupported phy type 0x%x\n",
2173  hw->phy_type);
2174  return -EOPNOTSUPP;
2175  }
2176  break;
2177 
2178  case CHIP_ID_YUKON:
2179  case CHIP_ID_YUKON_LITE:
2180  case CHIP_ID_YUKON_LP:
2181  if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
2182  hw->copper = 1;
2183 
2184  hw->phy_addr = PHY_ADDR_MARV;
2185  break;
2186 
2187  default:
2188  DBG(PFX "unsupported chip type 0x%x\n",
2189  hw->chip_id);
2190  return -EOPNOTSUPP;
2191  }
2192 
2193  mac_cfg = skge_read8(hw, B2_MAC_CFG);
2194  hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
2195  hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
2196 
2197  /* read the adapters RAM size */
2198  t8 = skge_read8(hw, B2_E_0);
2199  if (hw->chip_id == CHIP_ID_GENESIS) {
2200  if (t8 == 3) {
2201  /* special case: 4 x 64k x 36, offset = 0x80000 */
2202  hw->ram_size = 0x100000;
2203  hw->ram_offset = 0x80000;
2204  } else
2205  hw->ram_size = t8 * 512;
2206  }
2207  else if (t8 == 0)
2208  hw->ram_size = 0x20000;
2209  else
2210  hw->ram_size = t8 * 4096;
2211 
2212  hw->intr_mask = IS_HW_ERR;
2213 
2214  /* Use PHY IRQ for all but fiber based Genesis board */
2215  if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC))
2216  hw->intr_mask |= IS_EXT_REG;
2217 
2218  if (hw->chip_id == CHIP_ID_GENESIS)
2219  genesis_init(hw);
2220  else {
2221  /* switch power to VCC (WA for VAUX problem) */
2224 
2225  /* avoid boards with stuck Hardware error bits */
2226  if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
2228  DBG(PFX "stuck hardware sensor bit\n");
2229  hw->intr_mask &= ~IS_HW_ERR;
2230  }
2231 
2232  /* Clear PHY COMA */
2235  reg &= ~PCI_PHY_COMA;
2238 
2239 
2240  for (i = 0; i < hw->ports; i++) {
2243  }
2244  }
2245 
2246  /* turn off hardware timer (unused) */
2250 
2251  /* enable the Tx Arbiters */
2252  for (i = 0; i < hw->ports; i++)
2254 
2255  /* Initialize ram interface */
2257 
2270 
2272 
2273  /* Set interrupt moderation for Transmit only
2274  * Receive interrupts avoided by NAPI
2275  */
2279 
2280  skge_write32(hw, B0_IMSK, hw->intr_mask);
2281 
2282  for (i = 0; i < hw->ports; i++) {
2283  if (hw->chip_id == CHIP_ID_GENESIS)
2284  genesis_reset(hw, i);
2285  else
2286  yukon_reset(hw, i);
2287  }
2288 
2289  return 0;
2290 }
2291 
2292 /* Initialize network device */
2293 static struct net_device *skge_devinit(struct skge_hw *hw, int port,
2294  int highmem __unused)
2295 {
2296  struct skge_port *skge;
2297  struct net_device *dev = alloc_etherdev(sizeof(*skge));
2298 
2299  if (!dev) {
2300  DBG(PFX "etherdev alloc failed\n");
2301  return NULL;
2302  }
2303 
2304  dev->dev = &hw->pdev->dev;
2305 
2306  skge = dev->priv;
2307  skge->netdev = dev;
2308  skge->hw = hw;
2309 
2310  /* Auto speed and flow control */
2311  skge->autoneg = AUTONEG_ENABLE;
2313  skge->duplex = -1;
2314  skge->speed = -1;
2316 
2317  hw->dev[port] = dev;
2318 
2319  skge->port = port;
2320 
2321  /* read the mac address */
2322  memcpy(dev->hw_addr, (void *) (hw->regs + B2_MAC_1 + port*8), ETH_ALEN);
2323 
2324  return dev;
2325 }
2326 
2327 static void skge_show_addr(struct net_device *dev)
2328 {
2329  DBG2(PFX "%s: addr %s\n",
2330  dev->name, netdev_addr(dev));
2331 }
2332 
2333 static int skge_probe(struct pci_device *pdev)
2334 {
2335  struct net_device *dev, *dev1;
2336  struct skge_hw *hw;
2337  int err, using_dac = 0;
2338 
2340 
2341  err = -ENOMEM;
2342  hw = zalloc(sizeof(*hw));
2343  if (!hw) {
2344  DBG(PFX "cannot allocate hardware struct\n");
2345  goto err_out_free_regions;
2346  }
2347 
2348  hw->pdev = pdev;
2349 
2350  hw->regs = (unsigned long)pci_ioremap(pdev,
2352  SKGE_REG_SIZE);
2353  if (!hw->regs) {
2354  DBG(PFX "cannot map device registers\n");
2355  goto err_out_free_hw;
2356  }
2357 
2358  err = skge_reset(hw);
2359  if (err)
2360  goto err_out_iounmap;
2361 
2362  DBG(PFX " addr 0x%llx irq %d chip %s rev %d\n",
2363  (unsigned long long)pdev->ioaddr, pdev->irq,
2364  skge_board_name(hw), hw->chip_rev);
2365 
2366  dev = skge_devinit(hw, 0, using_dac);
2367  if (!dev)
2368  goto err_out_led_off;
2369 
2371 
2372  err = register_netdev(dev);
2373  if (err) {
2374  DBG(PFX "cannot register net device\n");
2375  goto err_out_free_netdev;
2376  }
2377 
2379 
2380  if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
2381  if (register_netdev(dev1) == 0)
2382  skge_show_addr(dev1);
2383  else {
2384  /* Failure to register second port need not be fatal */
2385  DBG(PFX "register of second port failed\n");
2386  hw->dev[1] = NULL;
2387  netdev_nullify(dev1);
2388  netdev_put(dev1);
2389  }
2390  }
2392 
2393  return 0;
2394 
2395 err_out_free_netdev:
2397  netdev_put(dev);
2398 err_out_led_off:
2400 err_out_iounmap:
2401  iounmap((void*)hw->regs);
2402 err_out_free_hw:
2403  free(hw);
2404 err_out_free_regions:
2406  return err;
2407 }
2408 
2409 static void skge_remove(struct pci_device *pdev)
2410 {
2411  struct skge_hw *hw = pci_get_drvdata(pdev);
2412  struct net_device *dev0, *dev1;
2413 
2414  if (!hw)
2415  return;
2416 
2417  if ((dev1 = hw->dev[1]))
2418  unregister_netdev(dev1);
2419  dev0 = hw->dev[0];
2420  unregister_netdev(dev0);
2421 
2422  hw->intr_mask = 0;
2423  skge_write32(hw, B0_IMSK, 0);
2425 
2428 
2429  if (dev1) {
2430  netdev_nullify(dev1);
2431  netdev_put(dev1);
2432  }
2433  netdev_nullify(dev0);
2434  netdev_put(dev0);
2435 
2436  iounmap((void*)hw->regs);
2437  free(hw);
2438  pci_set_drvdata(pdev, NULL);
2439 }
2440 
2441 /*
2442  * Enable or disable IRQ masking.
2443  *
2444  * @v netdev Device to control.
2445  * @v enable Zero to mask off IRQ, non-zero to enable IRQ.
2446  *
2447  * This is a iPXE Network Driver API function.
2448  */
2449 static void skge_net_irq ( struct net_device *dev, int enable ) {
2450  struct skge_port *skge = dev->priv;
2451  struct skge_hw *hw = skge->hw;
2452 
2453  if (enable)
2454  hw->intr_mask |= portmask[skge->port];
2455  else
2456  hw->intr_mask &= ~portmask[skge->port];
2457  skge_write32(hw, B0_IMSK, hw->intr_mask);
2458 }
2459 
2460 struct pci_driver skge_driver __pci_driver = {
2461  .ids = skge_id_table,
2462  .id_count = ( sizeof (skge_id_table) / sizeof (skge_id_table[0]) ),
2463  .probe = skge_probe,
2464  .remove = skge_remove
2465 };
2466 
#define XM_PAUSE_MODE
Definition: skge.h:2336
#define SPEED_1000
Definition: atl1e.h:52
uint16_t u16
Definition: stdint.h:21
static void yukon_link_down(struct skge_port *skge)
Definition: skge.c:1546
Definition: skge.h:108
uint32_t base
Base.
Definition: librm.h:138
#define EINVAL
Invalid argument.
Definition: errno.h:428
static void yukon_reset(struct skge_hw *hw, int port)
Definition: skge.c:1298
u32 dma_lo
Definition: skge.h:2438
uint8_t irq
Interrupt number.
Definition: pci.h:233
static u16 xm_read16(const struct skge_hw *hw, int port, int reg)
Definition: skge.h:2560
Definition: skge.h:523
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition: netdevice.h:766
int port
Definition: skge.h:2498
wmb()
static const u32 portmask[]
Definition: skge.c:88
Definition: skge.h:262
#define iob_put(iobuf, len)
Definition: iobuf.h:124
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:586
static void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
Definition: skge.h:2571
#define IPG_DATA_DEF
Definition: skge.h:1858
static void xm_link_timer(struct skge_port *skge)
Definition: skge.c:828
Definition: skge.h:707
A PCI driver.
Definition: pci.h:251
#define EBUSY
Device or resource busy.
Definition: errno.h:338
static void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
Definition: skge.h:2607
static void skge_net_irq(struct net_device *dev, int enable)
Definition: skge.c:2449
struct net_device * dev[2]
Definition: skge.h:2464
static unsigned int unsigned int reg
Definition: myson.h:162
static void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
Definition: skge.h:2565
#define GM_SMI_CT_PHY_AD(x)
Definition: skge.h:1869
#define PHY_B_DEF_MSK
Definition: skge.h:1328
Definition: skge.h:384
struct io_buffer * iob
Definition: skge.h:2450
Definition: skge.h:367
void __asmcall int val
Definition: setjmp.h:12
Definition: skge.h:81
#define PHY_M_LED_MO_RX(x)
Definition: skge.h:1567
#define SUPPORTED_1000baseT_Half
Definition: skge.h:62
static u32 hwkhz(const struct skge_hw *hw)
Definition: skge.c:122
u16 csum1_start
Definition: skge.h:2432
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
static void skge_show_addr(struct net_device *dev)
Definition: skge.c:2327
Definition: skge.h:334
u32 advertising
Definition: skge.h:2508
#define RX_BUF_SIZE
Definition: 3c90x.h:269
unsigned long ioaddr
I/O address.
Definition: pci.h:225
static void skge_rx_reuse(struct skge_element *e, unsigned int size)
Definition: skge.c:365
Error codes.
#define SUPPORTED_Autoneg
Definition: skge.h:64
#define TX_COL_THR(x)
Definition: skge.h:1818
#define PHY_M_LED_MO_DUP(x)
Definition: skge.h:1563
Definition: skge.h:524
FILE_LICENCE(GPL2_ONLY)
Definition: skge.h:562
I/O buffers.
void free_iob(struct io_buffer *iobuf)
Free I/O buffer.
Definition: iobuf.c:152
#define PHY_RETRIES
Definition: skge.h:28
Definition: skge.h:2095
uint16_t mode
Acceleration mode.
Definition: ena.h:26
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:253
#define ADVERTISED_100baseT_Half
Definition: bnx2.h:44
static void skge_link_down(struct skge_port *skge)
Definition: skge.c:407
int pci_write_config_word(struct pci_device *pci, unsigned int where, uint16_t value)
Write 16-bit word to PCI configuration space.
static void yukon_mac_init(struct skge_hw *hw, int port)
Definition: skge.c:1327
uint16_t size
Buffer size.
Definition: dwmac.h:14
#define SUPPORTED_100baseT_Full
Definition: skge.h:61
static struct net_device_operations skge_operations
Definition: skge.c:77
static void yukon_phy_intr(struct skge_port *skge)
Definition: skge.c:1568
char name[40]
Name.
Definition: device.h:78
static void xm_link_down(struct skge_hw *hw, int port)
Definition: skge.c:416
static struct net_device * skge_devinit(struct skge_hw *hw, int port, int highmem __unused)
Definition: skge.c:2293
#define AUTONEG_ENABLE
Definition: bnx2.h:4584
static int skge_probe(struct pci_device *pdev)
Definition: skge.c:2333
static int skge_up(struct net_device *dev)
Definition: skge.c:1708
int use_xm_link_timer
Definition: skge.h:2512
struct skge_element * next
Definition: skge.h:2448
Definition: skge.h:2100
u8 duplex
Definition: skge.h:2506
#define PCI_BASE_ADDRESS_0
Definition: pci.h:62
int pci_read_config_word(struct pci_device *pci, unsigned int where, uint16_t *value)
Read 16-bit word from PCI configuration space.
static void yukon_stop(struct skge_port *skge)
Definition: skge.c:1493
void * desc
Definition: skge.h:2449
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:230
Definition: skge.h:2076
static void bcom_check_link(struct skge_hw *hw, int port)
Definition: skge.c:554
static void skge_phyirq(struct skge_hw *hw)
Definition: skge.c:2083
#define SPEED_10
Definition: atl1e.h:50
static void skge_remove(struct pci_device *pdev)
Definition: skge.c:2409
struct skge_ring tx_ring
Definition: skge.h:2500
#define SKGE_REG_SIZE
Definition: skge.h:34
#define SKGE_RING_ALIGN
Definition: skge.h:26
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:240
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:130
static const char * netdev_addr(struct net_device *netdev)
Get printable network device link-layer address.
Definition: netdevice.h:541
Definition: hw.c:16
#define NUM_RX_DESC
Definition: igbvf.h:281
#define SUPPORTED_10baseT_Half
Definition: skge.h:58
struct skge_hw * hw
Definition: skge.h:2496
static u32 skge_read32(const struct skge_hw *hw, int reg)
Definition: skge.h:2517
Dynamic memory allocation.
#define ADVERTISED_1000baseT_Half
Definition: bnx2.h:46
u32 control
Definition: skge.h:2423
#define RB_ADDR(offs, queue)
Definition: skge.h:590
uint32_t start
Starting offset.
Definition: netvsc.h:12
#define SPEED_100
Definition: atl1e.h:51
uint32_t num
Definition: multiboot.h:12
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:518
static int is_yukon_lite_a0(struct skge_hw *hw)
Definition: skge.c:1312
#define TX_JAM_IPG_VAL(x)
Definition: skge.h:1841
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:365
#define rmb()
Definition: io.h:544
u32 dma
Definition: skge.h:2511
#define ENOMEM
Not enough space.
Definition: errno.h:534
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define GM_GPCR_SPEED_1000
Definition: skge.h:1807
Definition: skge.h:82
Definition: skge.h:542
static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
Definition: skge.c:1180
u8 port
Port number.
Definition: CIB_PRM.h:31
static const struct @117 skge_chips[]
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
Definition: skge.h:109
enum pause_status flow_status
Definition: skge.h:2504
#define ETH_HLEN
Definition: if_ether.h:9
static const char * skge_board_name(const struct skge_hw *hw)
Definition: skge.c:2112
static const u16 fiber_pause_map[]
Definition: skge.c:545
#define PHY_M_LED_BLINK_RT(x)
Definition: skge.h:1528
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:575
Ethernet protocol.
int pci_read_config_dword(struct pci_device *pci, unsigned int where, uint32_t *value)
Read 32-bit dword from PCI configuration space.
#define PHY_B_AS_PAUSE_MSK
Definition: skge.h:1307
#define PCI_PHY_COMA
Definition: skge.h:11
#define PHY_M_EC_M_DSC(x)
Definition: skge.h:1504
#define PCI_STATUS_ERROR_BITS
Definition: skge.h:70
struct skge_element * start
Definition: skge.h:2456
void * priv
Driver private data.
Definition: netdevice.h:431
static void xm_outhash(const struct skge_hw *hw, int port, int reg, const u8 *hash)
Definition: skge.h:2576
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
static void genesis_init(struct skge_hw *hw)
Definition: skge.c:481
Definition: skge.h:349
static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
Definition: skge.c:459
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:788
static u32 xm_read32(const struct skge_hw *hw, int port, int reg)
Definition: skge.h:2552
ring len
Length.
Definition: dwmac.h:231
#define GMAC_DEF_MSK
Definition: skge.h:2021
static u16 yukon_speed(const struct skge_hw *hw __unused, u16 aux)
Definition: skge.c:1513
static int xm_check_link(struct net_device *dev)
Definition: skge.c:753
void udelay(unsigned long usecs)
Delay for a fixed number of microseconds.
Definition: timer.c:60
u16 csum2_start
Definition: skge.h:2431
static int netdev_link_ok(struct net_device *netdev)
Check link state of network device.
Definition: netdevice.h:639
#define PHY_M_PS_PAUSE_MSK
Definition: skge.h:1451
Definition: skge.h:558
#define PHY_M_EC_S_DSC(x)
Definition: skge.h:1505
#define DUPLEX_FULL
Definition: bnx2.h:111
uint64_t u64
Definition: stdint.h:25
u8 autoneg
Definition: skge.h:2505
static u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
Definition: skge.c:128
#define RING_SIZE
Definition: skge.h:32
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
Definition: skge.h:541
u32 next_offset
Definition: skge.h:2437
Definition: skge.h:369
static void bcom_phy_init(struct skge_port *skge)
Definition: skge.c:622
#define SUPPORTED_TP
Definition: skge.h:65
static u8 skge_read8(const struct skge_hw *hw, int reg)
Definition: skge.h:2527
Definition: skge.h:782
#define TX_IPG_JAM_DATA(x)
Definition: skge.h:1842
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition: netdevice.c:941
#define PFX
Definition: sis190.h:34
#define DBGIO(...)
Definition: compiler.h:549
static void skge_write16(const struct skge_hw *hw, int reg, u16 val)
Definition: skge.h:2537
static struct pci_device_id skge_id_table[]
Definition: skge.c:47
static void skge_rx_clean(struct skge_port *skge)
Definition: skge.c:379
#define SK_PKT_TO_MAX
Definition: skge.h:441
struct pci_device * pdev
Definition: skge.h:2462
#define GPC_HWCFG_GMII_COP
Definition: skge.h:1988
led_mode
Definition: skge.c:133
static void skge_tx_done(struct net_device *dev)
Definition: skge.c:1941
Definition: skge.h:79
Definition: skge.h:376
Definition: skge.h:719
static void skge_link_up(struct skge_port *skge)
Definition: skge.c:395
#define XMR_DEF_MSK
Definition: skge.h:2386
static void(* free)(struct refcnt *refcnt))
Definition: refcnt.h:54
static const int rxqaddr[]
Definition: skge.c:87
void * zalloc(size_t size)
Allocate cleared memory.
Definition: malloc.c:661
#define IPG_DATA_VAL(x)
Definition: skge.h:1857
Definition: skge.h:358
struct skge_element * to_clean
Definition: skge.h:2454
PCI bus.
#define PHY_M_LED_PULS_DUR(x)
Definition: skge.h:1527
Definition: skge.h:2460
A PCI device.
Definition: pci.h:210
int register_netdev(struct net_device *netdev)
Register network device.
Definition: netdevice.c:759
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:159
u32 dma_hi
Definition: skge.h:2426
uint32_t addr
Buffer address.
Definition: dwmac.h:20
uint32_t control
Control.
Definition: myson.h:14
#define EOPNOTSUPP
Operation not supported on socket.
Definition: errno.h:604
A network device.
Definition: netdevice.h:352
static void skge_qset(struct skge_port *skge, u16 q, const struct skge_element *e)
Definition: skge.c:1676
static int skge_tx_avail(const struct skge_ring *ring)
Definition: skge.c:1857
static void xm_outaddr(const struct skge_hw *hw, int port, int reg, const u8 *addr)
Definition: skge.h:2585
static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
Definition: skge.c:1200
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:531
static void skge_poll(struct net_device *dev)
Definition: skge.c:2052
u16 csum2
Definition: skge.h:2429
#define TX_JAM_LEN_VAL(x)
Definition: skge.h:1840
#define ARRAY_SIZE(x)
Definition: efx_common.h:43
struct skge_ring rx_ring
Definition: skge.h:2501
#define SK_REG(port, reg)
Definition: skge.h:2548
Definition: skge.h:718
#define PCI_STATUS
PCI status.
Definition: pci.h:35
#define SUPPORTED_10baseT_Full
Definition: skge.h:59
static int skge_reset(struct skge_hw *hw)
Definition: skge.c:2130
#define ETH_ALEN
Definition: if_ether.h:8
A PCI device ID list entry.
Definition: pci.h:174
Definition: skge.h:356
static void skge_rx_stop(struct skge_hw *hw, int port)
Definition: skge.c:1783
#define SUPPORTED_100baseT_Half
Definition: skge.h:60
Definition: skge.h:78
static void genesis_stop(struct skge_port *skge)
Definition: skge.c:999
Definition: skge.h:539
static struct xen_remove_from_physmap * remove
Definition: xenmem.h:39
#define RX_RING_SIZE
Definition: 3c515.c:86
Definition: skge.h:377
static void skge_rx_setup(struct skge_port *skge __unused, struct skge_element *e, struct io_buffer *iob, unsigned int bufsize)
Definition: skge.c:339
uint8_t status
Status.
Definition: ena.h:16
Network device operations.
Definition: netdevice.h:213
uint16_t ext
Extended status.
Definition: ena.h:20
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:548
struct device * dev
Underlying hardware device.
Definition: netdevice.h:364
Network device management.
struct net_device * netdev
Definition: skge.h:2497
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:375
#define SK_RI_TO_53
Definition: skge.h:442
#define XM_EXM(reg)
Definition: skge.h:2089
static u16 gma_read16(const struct skge_hw *hw, int port, int reg)
Definition: skge.h:2596
static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
Definition: skge.c:1645
static void skge_tx_clean(struct net_device *dev)
Definition: skge.c:1910
u16 csum1
Definition: skge.h:2430
char name[NETDEV_NAME_LEN]
Name of this network device.
Definition: netdevice.h:362
#define PHY_M_LED_MO_1000(x)
Definition: skge.h:1566
#define XM_DEF_MODE
Definition: skge.h:2337
static __always_inline int struct dma_mapping * map
Definition: dma.h:183
enum pause_control flow_control
Definition: skge.h:2503
u32 status
Definition: skge.h:2427
int(* probe)(struct pci_device *pci)
Probe device.
Definition: pci.h:264
#define TX_COL_DEF
Definition: skge.h:1819
#define GPC_HWCFG_GMII_FIB
Definition: skge.h:1989
static void bcom_phy_intr(struct skge_port *skge)
Definition: skge.c:1130
static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base, size_t num)
Definition: skge.c:312
static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
Definition: skge.c:450
void skge_free(struct net_device *dev)
Definition: skge.c:1693
Definition: skge.h:496
void * data
Start of data.
Definition: iobuf.h:52
const char * name
Definition: skge.c:2104
Definition: skge.h:360
Definition: skge.h:393
#define EIO
Input/output error.
Definition: errno.h:433
#define DATA_BLIND_DEF
Definition: skge.h:1855
struct pci_driver skge_driver __pci_driver
Definition: skge.c:2460
void * mem
Definition: skge.h:2510
#define SUPPORTED_FIBRE
Definition: skge.h:66
void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.c:722
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition: ethernet.c:264
u32 dma_lo
Definition: skge.h:2425
static void yukon_link_up(struct skge_port *skge)
Definition: skge.c:1525
#define Q_ADDR(reg, offs)
Definition: skge.h:534
int pci_write_config_dword(struct pci_device *pci, unsigned int where, uint32_t value)
Write 32-bit dword to PCI configuration space.
uint8_t ctrl
Ring control.
Definition: dwmac.h:18
#define SK_FACT_53
Definition: skge.h:865
Definition: skge.h:128
static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
Definition: skge.c:1160
uint32_t end
Ending offset.
Definition: netvsc.h:18
void iounmap(volatile const void *io_addr)
Unmap I/O address.
static void yukon_init(struct skge_hw *hw, int port)
Definition: skge.c:1211
#define SUPPORTED_1000baseT_Full
Definition: skge.h:63
static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob)
Definition: skge.c:1864
#define ADVERTISED_10baseT_Full
Definition: bnx2.h:43
uint16_t reason
Rejection reason.
Definition: ib_mad.h:20
static void xm_phy_init(struct skge_port *skge)
Definition: skge.c:719
static void genesis_link_up(struct skge_port *skge)
Definition: skge.c:1046
u32 dma_hi
Definition: skge.h:2439
#define GM_SMI_CT_REG_AD(x)
Definition: skge.h:1870
#define AUTONEG_DISABLE
Definition: bnx2.h:4583
int snprintf(char *buf, size_t size, const char *fmt,...)
Write a formatted string to a buffer.
Definition: vsprintf.c:382
static void skge_down(struct net_device *dev)
Definition: skge.c:1791
#define PHY_M_EC_MAC_S(x)
Definition: skge.h:1506
void mb(void)
Memory barrier.
u8 id
Definition: skge.c:2103
#define DATA_BLIND_VAL(x)
Definition: skge.h:1854
static const int txqaddr[]
Definition: skge.c:86
#define PHY_M_LED_MO_10(x)
Definition: skge.h:1564
static void gma_set_addr(struct skge_hw *hw, int port, int reg, const u8 *addr)
Definition: skge.h:2612
Definition: skge.h:518
#define GM_MIB_CNT_SIZE
Definition: skge.h:1716
uint8_t ll_addr[MAX_LL_ADDR_LEN]
Link-layer address.
Definition: netdevice.h:387
static int bad_phy_status(const struct skge_hw *hw, u32 status)
Definition: skge.c:1931
static void skge_rx_refill(struct net_device *dev)
Definition: skge.c:1963
Definition: skge.h:254
#define DBG(...)
Print a debugging message.
Definition: compiler.h:498
#define GM_GPCR_AU_ALL_DIS
Definition: skge.h:1808
uint16_t supported
Bitmask of supported option values.
Definition: ena.h:12
#define SK_BLK_DUR
Definition: skge.h:857
static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
Definition: skge.c:427
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
#define CSR_CLR_RESET
Definition: skge.h:793
uint8_t bufsize
Size of the packet, in bytes.
Definition: int13.h:12
#define CSR_SET_RESET
Definition: skge.h:790
#define PHY_M_LED_MO_100(x)
Definition: skge.h:1565
static void skge_rx_done(struct net_device *dev)
Definition: skge.c:2005
Definition: skge.h:112
u16 speed
Definition: skge.h:2507
Definition: skge.h:519
static void skge_led(struct skge_port *skge, enum led_mode mode)
Definition: skge.c:134
static void yukon_suspend(struct skge_hw *hw, int port)
Definition: skge.c:1475
#define ADVERTISED_1000baseT_Full
Definition: bnx2.h:47
#define NUM_TX_DESC
Definition: igbvf.h:280
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
Definition: skge.h:557
static void genesis_reset(struct skge_hw *hw, int port)
Definition: skge.c:509
#define ADVERTISED_100baseT_Full
Definition: bnx2.h:45
#define ETIMEDOUT
Connection timed out.
Definition: errno.h:669
#define GPC_ANEG_ADV_ALL_M
Definition: skge.h:1990
String functions.
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition: pci.h:307
static u16 phy_length(const struct skge_hw *hw, u32 status)
Definition: skge.c:1923
Definition: skge.h:378
Definition: skge.h:2075
struct skge_element * to_use
Definition: skge.h:2455
#define ADVERTISED_10baseT_Half
Definition: bnx2.h:42
uint8_t u8
Definition: stdint.h:19
static void skge_write32(const struct skge_hw *hw, int reg, u32 val)
Definition: skge.h:2532
u32 control
Definition: skge.h:2436
uint32_t u32
Definition: stdint.h:23
Definition: skge.h:101
Definition: skge.h:540
static u16 skge_read16(const struct skge_hw *hw, int reg)
Definition: skge.h:2522
Definition: skge.h:560
Definition: skge.h:2094
static void genesis_mac_init(struct skge_hw *hw, int port)
Definition: skge.c:853
void * malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.c:706
static u32 skge_supported_modes(const struct skge_hw *hw)
Definition: skge.c:93
#define DBG2(...)
Definition: compiler.h:515
static const u16 phy_pause_map[]
Definition: skge.c:537
#define DUPLEX_HALF
Definition: bnx2.h:110
void * memset(void *dest, int character, size_t len) __nonnull
#define XMT_DEF_MSK
Definition: skge.h:2420
static const uint8_t r[3][4]
MD4 shift amounts.
Definition: md4.c:53
#define SK_MAC_TO_53
Definition: skge.h:439
A persistent I/O buffer.
Definition: iobuf.h:37
static void skge_write8(const struct skge_hw *hw, int reg, u8 val)
Definition: skge.h:2542
Definition: skge.h:550