iPXE
skge.c
Go to the documentation of this file.
1 /*
2  * iPXE driver for Marvell Yukon chipset and SysKonnect Gigabit
3  * Ethernet adapters. Derived from Linux skge driver (v1.13), which was
4  * based on earlier sk98lin, e100 and FreeBSD if_sk drivers.
5  *
6  * This driver intentionally does not support all the features of the
7  * original driver such as link fail-over and link management because
8  * those should be done at higher levels.
9  *
10  * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11  *
12  * Modified for iPXE, July 2008 by Michael Decker <mrd999@gmail.com>
13  * Tested and Modified in December 2009 by
14  * Thomas Miletich <thomas.miletich@gmail.com>
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License.
19  *
20  * This program is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23  * GNU General Public License for more details.
24  *
25  * You should have received a copy of the GNU General Public License
26  * along with this program; if not, write to the Free Software
27  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
28  * 02110-1301, USA.
29  */
30 
31 FILE_LICENCE ( GPL2_ONLY );
32 
33 #include <stdint.h>
34 #include <errno.h>
35 #include <stdio.h>
36 #include <unistd.h>
37 #include <ipxe/netdevice.h>
38 #include <ipxe/ethernet.h>
39 #include <ipxe/if_ether.h>
40 #include <ipxe/iobuf.h>
41 #include <ipxe/malloc.h>
42 #include <ipxe/pci.h>
43 
44 #include "skge.h"
45 
46 static struct pci_device_id skge_id_table[] = {
47  PCI_ROM(0x10b7, 0x1700, "3C940", "3COM 3C940", 0),
48  PCI_ROM(0x10b7, 0x80eb, "3C940B", "3COM 3C940", 0),
49  PCI_ROM(0x1148, 0x4300, "GE", "Syskonnect GE", 0),
50  PCI_ROM(0x1148, 0x4320, "YU", "Syskonnect YU", 0),
51  PCI_ROM(0x1186, 0x4C00, "DGE510T", "DLink DGE-510T", 0),
52  PCI_ROM(0x1186, 0x4b01, "DGE530T", "DLink DGE-530T", 0),
53  PCI_ROM(0x11ab, 0x4320, "id4320", "Marvell id4320", 0),
54  PCI_ROM(0x11ab, 0x5005, "id5005", "Marvell id5005", 0), /* Belkin */
55  PCI_ROM(0x1371, 0x434e, "Gigacard", "CNET Gigacard", 0),
56  PCI_ROM(0x1737, 0x1064, "EG1064", "Linksys EG1064", 0),
57  PCI_ROM(0x1737, 0xffff, "id_any", "Linksys [any]", 0)
58 };
59 
60 static int skge_up(struct net_device *dev);
61 static void skge_down(struct net_device *dev);
62 static void skge_tx_clean(struct net_device *dev);
63 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
64 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
65 static void yukon_init(struct skge_hw *hw, int port);
66 static void genesis_mac_init(struct skge_hw *hw, int port);
67 static void genesis_link_up(struct skge_port *skge);
68 
69 static void skge_phyirq(struct skge_hw *hw);
70 static void skge_poll(struct net_device *dev);
71 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob);
72 static void skge_net_irq ( struct net_device *dev, int enable );
73 
74 static void skge_rx_refill(struct net_device *dev);
75 
77  .open = skge_up,
78  .close = skge_down,
79  .transmit = skge_xmit_frame,
80  .poll = skge_poll,
81  .irq = skge_net_irq
82 };
83 
84 /* Avoid conditionals by using array */
85 static const int txqaddr[] = { Q_XA1, Q_XA2 };
86 static const int rxqaddr[] = { Q_R1, Q_R2 };
87 static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
88 
89 /* Determine supported/advertised modes based on hardware.
90  * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
91  */
92 static u32 skge_supported_modes(const struct skge_hw *hw)
93 {
94  u32 supported;
95 
96  if (hw->copper) {
97  supported = SUPPORTED_10baseT_Half
104 
105  if (hw->chip_id == CHIP_ID_GENESIS)
106  supported &= ~(SUPPORTED_10baseT_Half
110 
111  else if (hw->chip_id == CHIP_ID_YUKON)
112  supported &= ~SUPPORTED_1000baseT_Half;
113  } else
116 
117  return supported;
118 }
119 
120 /* Chip internal frequency for clock calculations */
121 static inline u32 hwkhz(const struct skge_hw *hw)
122 {
123  return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
124 }
125 
126 /* Microseconds to chip HZ */
127 static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
128 {
129  return hwkhz(hw) * usec / 1000;
130 }
131 
133 static void skge_led(struct skge_port *skge, enum led_mode mode)
134 {
135  struct skge_hw *hw = skge->hw;
136  int port = skge->port;
137 
138  if (hw->chip_id == CHIP_ID_GENESIS) {
139  switch (mode) {
140  case LED_MODE_OFF:
141  if (hw->phy_type == SK_PHY_BCOM)
143  else {
146  }
150  break;
151 
152  case LED_MODE_ON:
155 
158 
159  break;
160 
161  case LED_MODE_TST:
165 
166  if (hw->phy_type == SK_PHY_BCOM)
168  else {
172  }
173 
174  }
175  } else {
176  switch (mode) {
177  case LED_MODE_OFF:
185  break;
186  case LED_MODE_ON:
192 
195  (skge->speed == SPEED_100 ?
197  break;
198  case LED_MODE_TST:
206  }
207  }
208 }
209 
210 /*
211  * I've left in these EEPROM and VPD functions, as someone may desire to
212  * integrate them in the future. -mdeck
213  *
214  * static int skge_get_eeprom_len(struct net_device *dev)
215  * {
216  * struct skge_port *skge = netdev_priv(dev);
217  * u32 reg2;
218  *
219  * pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
220  * return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
221  * }
222  *
223  * static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
224  * {
225  * u32 val;
226  *
227  * pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
228  *
229  * do {
230  * pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
231  * } while (!(offset & PCI_VPD_ADDR_F));
232  *
233  * pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
234  * return val;
235  * }
236  *
237  * static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
238  * {
239  * pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
240  * pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
241  * offset | PCI_VPD_ADDR_F);
242  *
243  * do {
244  * pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
245  * } while (offset & PCI_VPD_ADDR_F);
246  * }
247  *
248  * static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
249  * u8 *data)
250  * {
251  * struct skge_port *skge = netdev_priv(dev);
252  * struct pci_dev *pdev = skge->hw->pdev;
253  * int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
254  * int length = eeprom->len;
255  * u16 offset = eeprom->offset;
256  *
257  * if (!cap)
258  * return -EINVAL;
259  *
260  * eeprom->magic = SKGE_EEPROM_MAGIC;
261  *
262  * while (length > 0) {
263  * u32 val = skge_vpd_read(pdev, cap, offset);
264  * int n = min_t(int, length, sizeof(val));
265  *
266  * memcpy(data, &val, n);
267  * length -= n;
268  * data += n;
269  * offset += n;
270  * }
271  * return 0;
272  * }
273  *
274  * static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
275  * u8 *data)
276  * {
277  * struct skge_port *skge = netdev_priv(dev);
278  * struct pci_dev *pdev = skge->hw->pdev;
279  * int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
280  * int length = eeprom->len;
281  * u16 offset = eeprom->offset;
282  *
283  * if (!cap)
284  * return -EINVAL;
285  *
286  * if (eeprom->magic != SKGE_EEPROM_MAGIC)
287  * return -EINVAL;
288  *
289  * while (length > 0) {
290  * u32 val;
291  * int n = min_t(int, length, sizeof(val));
292  *
293  * if (n < sizeof(val))
294  * val = skge_vpd_read(pdev, cap, offset);
295  * memcpy(&val, data, n);
296  *
297  * skge_vpd_write(pdev, cap, offset, val);
298  *
299  * length -= n;
300  * data += n;
301  * offset += n;
302  * }
303  * return 0;
304  * }
305  */
306 
307 /*
308  * Allocate ring elements and chain them together
309  * One-to-one association of board descriptors with ring elements
310  */
311 static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base,
312  size_t num)
313 {
314  struct skge_tx_desc *d;
315  struct skge_element *e;
316  unsigned int i;
317 
318  ring->start = zalloc(num*sizeof(*e));
319  if (!ring->start)
320  return -ENOMEM;
321 
322  for (i = 0, e = ring->start, d = vaddr; i < num; i++, e++, d++) {
323  e->desc = d;
324  if (i == num - 1) {
325  e->next = ring->start;
326  d->next_offset = base;
327  } else {
328  e->next = e + 1;
329  d->next_offset = base + (i+1) * sizeof(*d);
330  }
331  }
332  ring->to_use = ring->to_clean = ring->start;
333 
334  return 0;
335 }
336 
337 /* Allocate and setup a new buffer for receiving */
338 static void skge_rx_setup(struct skge_port *skge __unused,
339  struct skge_element *e,
340  struct io_buffer *iob, unsigned int bufsize)
341 {
342  struct skge_rx_desc *rd = e->desc;
343  u64 map;
344 
345  map = ( iob != NULL ) ? virt_to_bus(iob->data) : 0;
346 
347  rd->dma_lo = map;
348  rd->dma_hi = map >> 32;
349  e->iob = iob;
350  rd->csum1_start = ETH_HLEN;
351  rd->csum2_start = ETH_HLEN;
352  rd->csum1 = 0;
353  rd->csum2 = 0;
354 
355  wmb();
356 
358 }
359 
360 /* Resume receiving using existing skb,
361  * Note: DMA address is not changed by chip.
362  * MTU not changed while receiver active.
363  */
364 static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
365 {
366  struct skge_rx_desc *rd = e->desc;
367 
368  rd->csum2 = 0;
369  rd->csum2_start = ETH_HLEN;
370 
371  wmb();
372 
374 }
375 
376 
377 /* Free all buffers in receive ring, assumes receiver stopped */
378 static void skge_rx_clean(struct skge_port *skge)
379 {
380  struct skge_ring *ring = &skge->rx_ring;
381  struct skge_element *e;
382 
383  e = ring->start;
384  do {
385  struct skge_rx_desc *rd = e->desc;
386  rd->control = 0;
387  if (e->iob) {
388  free_iob(e->iob);
389  e->iob = NULL;
390  }
391  } while ((e = e->next) != ring->start);
392 }
393 
394 static void skge_link_up(struct skge_port *skge)
395 {
396  skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
398 
399  netdev_link_up(skge->netdev);
400 
401  DBG2(PFX "%s: Link is up at %d Mbps, %s duplex\n",
402  skge->netdev->name, skge->speed,
403  skge->duplex == DUPLEX_FULL ? "full" : "half");
404 }
405 
406 static void skge_link_down(struct skge_port *skge)
407 {
408  skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
409  netdev_link_down(skge->netdev);
410 
411  DBG2(PFX "%s: Link is down.\n", skge->netdev->name);
412 }
413 
414 
415 static void xm_link_down(struct skge_hw *hw, int port)
416 {
417  struct net_device *dev = hw->dev[port];
418  struct skge_port *skge = netdev_priv(dev);
419 
421 
422  if (netdev_link_ok(dev))
423  skge_link_down(skge);
424 }
425 
426 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
427 {
428  int i;
429 
430  xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
432 
433  if (hw->phy_type == SK_PHY_XMAC)
434  goto ready;
435 
436  for (i = 0; i < PHY_RETRIES; i++) {
438  goto ready;
439  udelay(1);
440  }
441 
442  return -ETIMEDOUT;
443  ready:
445 
446  return 0;
447 }
448 
449 static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
450 {
451  u16 v = 0;
452  if (__xm_phy_read(hw, port, reg, &v))
453  DBG(PFX "%s: phy read timed out\n",
454  hw->dev[port]->name);
455  return v;
456 }
457 
458 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
459 {
460  int i;
461 
462  xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
463  for (i = 0; i < PHY_RETRIES; i++) {
465  goto ready;
466  udelay(1);
467  }
468  return -EIO;
469 
470  ready:
472  for (i = 0; i < PHY_RETRIES; i++) {
474  return 0;
475  udelay(1);
476  }
477  return -ETIMEDOUT;
478 }
479 
480 static void genesis_init(struct skge_hw *hw)
481 {
482  /* set blink source counter */
485 
486  /* configure mac arbiter */
488 
489  /* configure mac arbiter timeout values */
494 
499 
500  /* configure packet arbiter timeout */
506 }
507 
508 static void genesis_reset(struct skge_hw *hw, int port)
509 {
510  const u8 zero[8] = { 0 };
511  u32 reg;
512 
514 
515  /* reset the statistics module */
518  xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
519  xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
520  xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
521 
522  /* disable Broadcom PHY IRQ */
523  if (hw->phy_type == SK_PHY_BCOM)
524  xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
525 
527 
528  /* Flush TX and RX fifo */
529  reg = xm_read32(hw, port, XM_MODE);
532 }
533 
534 
535 /* Convert mode to MII values */
536 static const u16 phy_pause_map[] = {
537  [FLOW_MODE_NONE] = 0,
541 };
542 
543 /* special defines for FIBER (88E1011S only) */
544 static const u16 fiber_pause_map[] = {
549 };
550 
551 
552 /* Check status of Broadcom phy link */
553 static void bcom_check_link(struct skge_hw *hw, int port)
554 {
555  struct net_device *dev = hw->dev[port];
556  struct skge_port *skge = netdev_priv(dev);
557  u16 status;
558 
559  /* read twice because of latch */
562 
563  if ((status & PHY_ST_LSYNC) == 0) {
564  xm_link_down(hw, port);
565  return;
566  }
567 
568  if (skge->autoneg == AUTONEG_ENABLE) {
569  u16 lpa, aux;
570 
571  if (!(status & PHY_ST_AN_OVER))
572  return;
573 
575  if (lpa & PHY_B_AN_RF) {
576  DBG(PFX "%s: remote fault\n",
577  dev->name);
578  return;
579  }
580 
582 
583  /* Check Duplex mismatch */
584  switch (aux & PHY_B_AS_AN_RES_MSK) {
585  case PHY_B_RES_1000FD:
586  skge->duplex = DUPLEX_FULL;
587  break;
588  case PHY_B_RES_1000HD:
589  skge->duplex = DUPLEX_HALF;
590  break;
591  default:
592  DBG(PFX "%s: duplex mismatch\n",
593  dev->name);
594  return;
595  }
596 
597  /* We are using IEEE 802.3z/D5.0 Table 37-4 */
598  switch (aux & PHY_B_AS_PAUSE_MSK) {
599  case PHY_B_AS_PAUSE_MSK:
601  break;
602  case PHY_B_AS_PRR:
604  break;
605  case PHY_B_AS_PRT:
607  break;
608  default:
609  skge->flow_status = FLOW_STAT_NONE;
610  }
611  skge->speed = SPEED_1000;
612  }
613 
614  if (!netdev_link_ok(dev))
615  genesis_link_up(skge);
616 }
617 
618 /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
619  * Phy on for 100 or 10Mbit operation
620  */
621 static void bcom_phy_init(struct skge_port *skge)
622 {
623  struct skge_hw *hw = skge->hw;
624  int port = skge->port;
625  unsigned int i;
626  u16 id1, r, ext, ctl;
627 
628  /* magic workaround patterns for Broadcom */
629  static const struct {
630  u16 reg;
631  u16 val;
632  } A1hack[] = {
633  { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
634  { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
635  { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
636  { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
637  }, C0hack[] = {
638  { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
639  { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
640  };
641 
642  /* read Id from external PHY (all have the same address) */
643  id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
644 
645  /* Optimize MDIO transfer by suppressing preamble. */
647  r |= XM_MMU_NO_PRE;
649 
650  switch (id1) {
651  case PHY_BCOM_ID1_C0:
652  /*
653  * Workaround BCOM Errata for the C0 type.
654  * Write magic patterns to reserved registers.
655  */
656  for (i = 0; i < ARRAY_SIZE(C0hack); i++)
658  C0hack[i].reg, C0hack[i].val);
659 
660  break;
661  case PHY_BCOM_ID1_A1:
662  /*
663  * Workaround BCOM Errata for the A1 type.
664  * Write magic patterns to reserved registers.
665  */
666  for (i = 0; i < ARRAY_SIZE(A1hack); i++)
668  A1hack[i].reg, A1hack[i].val);
669  break;
670  }
671 
672  /*
673  * Workaround BCOM Errata (#10523) for all BCom PHYs.
674  * Disable Power Management after reset.
675  */
677  r |= PHY_B_AC_DIS_PM;
679 
680  /* Dummy read */
682 
683  ext = PHY_B_PEC_EN_LTR; /* enable tx led */
684  ctl = PHY_CT_SP1000; /* always 1000mbit */
685 
686  if (skge->autoneg == AUTONEG_ENABLE) {
687  /*
688  * Workaround BCOM Errata #1 for the C5 type.
689  * 1000Base-T Link Acquisition Failure in Slave Mode
690  * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
691  */
692  u16 adv = PHY_B_1000C_RD;
694  adv |= PHY_B_1000C_AHD;
696  adv |= PHY_B_1000C_AFD;
698 
699  ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
700  } else {
701  if (skge->duplex == DUPLEX_FULL)
702  ctl |= PHY_CT_DUP_MD;
703  /* Force to slave */
705  }
706 
707  /* Set autonegotiation pause parameters */
710 
713 
714  /* Use link status change interrupt */
716 }
717 
718 static void xm_phy_init(struct skge_port *skge)
719 {
720  struct skge_hw *hw = skge->hw;
721  int port = skge->port;
722  u16 ctrl = 0;
723 
724  if (skge->autoneg == AUTONEG_ENABLE) {
726  ctrl |= PHY_X_AN_HD;
728  ctrl |= PHY_X_AN_FD;
729 
731 
733 
734  /* Restart Auto-negotiation */
736  } else {
737  /* Set DuplexMode in Config register */
738  if (skge->duplex == DUPLEX_FULL)
739  ctrl |= PHY_CT_DUP_MD;
740  /*
741  * Do NOT enable Auto-negotiation here. This would hold
742  * the link down because no IDLEs are transmitted
743  */
744  }
745 
747 
748  /* Poll PHY for status changes */
749  skge->use_xm_link_timer = 1;
750 }
751 
752 static int xm_check_link(struct net_device *dev)
753 {
754  struct skge_port *skge = netdev_priv(dev);
755  struct skge_hw *hw = skge->hw;
756  int port = skge->port;
757  u16 status;
758 
759  /* read twice because of latch */
762 
763  if ((status & PHY_ST_LSYNC) == 0) {
764  xm_link_down(hw, port);
765  return 0;
766  }
767 
768  if (skge->autoneg == AUTONEG_ENABLE) {
769  u16 lpa, res;
770 
771  if (!(status & PHY_ST_AN_OVER))
772  return 0;
773 
775  if (lpa & PHY_B_AN_RF) {
776  DBG(PFX "%s: remote fault\n",
777  dev->name);
778  return 0;
779  }
780 
782 
783  /* Check Duplex mismatch */
784  switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
785  case PHY_X_RS_FD:
786  skge->duplex = DUPLEX_FULL;
787  break;
788  case PHY_X_RS_HD:
789  skge->duplex = DUPLEX_HALF;
790  break;
791  default:
792  DBG(PFX "%s: duplex mismatch\n",
793  dev->name);
794  return 0;
795  }
796 
797  /* We are using IEEE 802.3z/D5.0 Table 37-4 */
798  if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
800  (lpa & PHY_X_P_SYM_MD))
802  else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
803  (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
804  /* Enable PAUSE receive, disable PAUSE transmit */
806  else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
807  (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
808  /* Disable PAUSE receive, enable PAUSE transmit */
810  else
811  skge->flow_status = FLOW_STAT_NONE;
812 
813  skge->speed = SPEED_1000;
814  }
815 
816  if (!netdev_link_ok(dev))
817  genesis_link_up(skge);
818  return 1;
819 }
820 
821 /* Poll to check for link coming up.
822  *
823  * Since internal PHY is wired to a level triggered pin, can't
824  * get an interrupt when carrier is detected, need to poll for
825  * link coming up.
826  */
827 static void xm_link_timer(struct skge_port *skge)
828 {
829  struct net_device *dev = skge->netdev;
830  struct skge_hw *hw = skge->hw;
831  int port = skge->port;
832  int i;
833 
834  /*
835  * Verify that the link by checking GPIO register three times.
836  * This pin has the signal from the link_sync pin connected to it.
837  */
838  for (i = 0; i < 3; i++) {
840  return;
841  }
842 
843  /* Re-enable interrupt to detect link down */
844  if (xm_check_link(dev)) {
845  u16 msk = xm_read16(hw, port, XM_IMSK);
846  msk &= ~XM_IS_INP_ASS;
847  xm_write16(hw, port, XM_IMSK, msk);
849  }
850 }
851 
852 static void genesis_mac_init(struct skge_hw *hw, int port)
853 {
854  struct net_device *dev = hw->dev[port];
855  struct skge_port *skge = netdev_priv(dev);
856  int i;
857  u32 r;
858  const u8 zero[6] = { 0 };
859 
860  for (i = 0; i < 10; i++) {
864  goto reset_ok;
865  udelay(1);
866  }
867 
868  DBG(PFX "%s: genesis reset failed\n", dev->name);
869 
870  reset_ok:
871  /* Unreset the XMAC. */
873 
874  /*
875  * Perform additional initialization for external PHYs,
876  * namely for the 1000baseTX cards that use the XMAC's
877  * GMII mode.
878  */
879  if (hw->phy_type != SK_PHY_XMAC) {
880  /* Take external Phy out of reset */
881  r = skge_read32(hw, B2_GP_IO);
882  if (port == 0)
883  r |= GP_DIR_0|GP_IO_0;
884  else
885  r |= GP_DIR_2|GP_IO_2;
886 
888 
889  /* Enable GMII interface */
891  }
892 
893 
894  switch(hw->phy_type) {
895  case SK_PHY_XMAC:
896  xm_phy_init(skge);
897  break;
898  case SK_PHY_BCOM:
899  bcom_phy_init(skge);
901  }
902 
903  /* Set Station Address */
904  xm_outaddr(hw, port, XM_SA, dev->ll_addr);
905 
906  /* We don't use match addresses so clear */
907  for (i = 1; i < 16; i++)
908  xm_outaddr(hw, port, XM_EXM(i), zero);
909 
910  /* Clear MIB counters */
913  /* Clear two times according to Errata #3 */
916 
917  /* configure Rx High Water Mark (XM_RX_HI_WM) */
918  xm_write16(hw, port, XM_RX_HI_WM, 1450);
919 
920  /* We don't need the FCS appended to the packet. */
922 
923  if (skge->duplex == DUPLEX_HALF) {
924  /*
925  * If in manual half duplex mode the other side might be in
926  * full duplex mode, so ignore if a carrier extension is not seen
927  * on frames received
928  */
929  r |= XM_RX_DIS_CEXT;
930  }
932 
933  /* We want short frames padded to 60 bytes. */
935 
936  xm_write16(hw, port, XM_TX_THR, 512);
937 
938  /*
939  * Enable the reception of all error frames. This is is
940  * a necessary evil due to the design of the XMAC. The
941  * XMAC's receive FIFO is only 8K in size, however jumbo
942  * frames can be up to 9000 bytes in length. When bad
943  * frame filtering is enabled, the XMAC's RX FIFO operates
944  * in 'store and forward' mode. For this to work, the
945  * entire frame has to fit into the FIFO, but that means
946  * that jumbo frames larger than 8192 bytes will be
947  * truncated. Disabling all bad frame filtering causes
948  * the RX FIFO to operate in streaming mode, in which
949  * case the XMAC will start transferring frames out of the
950  * RX FIFO as soon as the FIFO threshold is reached.
951  */
953 
954 
955  /*
956  * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
957  * - Enable all bits excepting 'Octets Rx OK Low CntOv'
958  * and 'Octets Rx OK Hi Cnt Ov'.
959  */
961 
962  /*
963  * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
964  * - Enable all bits excepting 'Octets Tx OK Low CntOv'
965  * and 'Octets Tx OK Hi Cnt Ov'.
966  */
968 
969  /* Configure MAC arbiter */
971 
972  /* configure timeout values */
977 
982 
983  /* Configure Rx MAC FIFO */
987 
988  /* Configure Tx MAC FIFO */
992 
993  /* enable timeout timers */
995  (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
996 }
997 
998 static void genesis_stop(struct skge_port *skge)
999 {
1000  struct skge_hw *hw = skge->hw;
1001  int port = skge->port;
1002  unsigned retries = 1000;
1003  u16 cmd;
1004 
1005  /* Disable Tx and Rx */
1009 
1010  genesis_reset(hw, port);
1011 
1012  /* Clear Tx packet arbiter timeout IRQ */
1014  port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1015 
1016  /* Reset the MAC */
1018  do {
1021  break;
1022  } while (--retries > 0);
1023 
1024  /* For external PHYs there must be special handling */
1025  if (hw->phy_type != SK_PHY_XMAC) {
1027  if (port == 0) {
1028  reg |= GP_DIR_0;
1029  reg &= ~GP_IO_0;
1030  } else {
1031  reg |= GP_DIR_2;
1032  reg &= ~GP_IO_2;
1033  }
1036  }
1037 
1040  & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1041 
1043 }
1044 
1045 static void genesis_link_up(struct skge_port *skge)
1046 {
1047  struct skge_hw *hw = skge->hw;
1048  int port = skge->port;
1049  u16 cmd, msk;
1050  u32 mode;
1051 
1053 
1054  /*
1055  * enabling pause frame reception is required for 1000BT
1056  * because the XMAC is not reset if the link is going down
1057  */
1058  if (skge->flow_status == FLOW_STAT_NONE ||
1060  /* Disable Pause Frame Reception */
1061  cmd |= XM_MMU_IGN_PF;
1062  else
1063  /* Enable Pause Frame Reception */
1064  cmd &= ~XM_MMU_IGN_PF;
1065 
1067 
1068  mode = xm_read32(hw, port, XM_MODE);
1069  if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
1070  skge->flow_status == FLOW_STAT_LOC_SEND) {
1071  /*
1072  * Configure Pause Frame Generation
1073  * Use internal and external Pause Frame Generation.
1074  * Sending pause frames is edge triggered.
1075  * Send a Pause frame with the maximum pause time if
1076  * internal oder external FIFO full condition occurs.
1077  * Send a zero pause time frame to re-start transmission.
1078  */
1079  /* XM_PAUSE_DA = '010000C28001' (default) */
1080  /* XM_MAC_PTIME = 0xffff (maximum) */
1081  /* remember this value is defined in big endian (!) */
1082  xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1083 
1084  mode |= XM_PAUSE_MODE;
1086  } else {
1087  /*
1088  * disable pause frame generation is required for 1000BT
1089  * because the XMAC is not reset if the link is going down
1090  */
1091  /* Disable Pause Mode in Mode Register */
1092  mode &= ~XM_PAUSE_MODE;
1093 
1095  }
1096 
1097  xm_write32(hw, port, XM_MODE, mode);
1098 
1099  /* Turn on detection of Tx underrun */
1100  msk = xm_read16(hw, port, XM_IMSK);
1101  msk &= ~XM_IS_TXF_UR;
1102  xm_write16(hw, port, XM_IMSK, msk);
1103 
1104  xm_read16(hw, port, XM_ISRC);
1105 
1106  /* get MMU Command Reg. */
1108  if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
1109  cmd |= XM_MMU_GMII_FD;
1110 
1111  /*
1112  * Workaround BCOM Errata (#10523) for all BCom Phys
1113  * Enable Power Management after link up
1114  */
1115  if (hw->phy_type == SK_PHY_BCOM) {
1118  & ~PHY_B_AC_DIS_PM);
1120  }
1121 
1122  /* enable Rx/Tx */
1125  skge_link_up(skge);
1126 }
1127 
1128 
1129 static inline void bcom_phy_intr(struct skge_port *skge)
1130 {
1131  struct skge_hw *hw = skge->hw;
1132  int port = skge->port;
1133  u16 isrc;
1134 
1136  DBGIO(PFX "%s: phy interrupt status 0x%x\n",
1137  skge->netdev->name, isrc);
1138 
1139  if (isrc & PHY_B_IS_PSE)
1140  DBG(PFX "%s: uncorrectable pair swap error\n",
1141  hw->dev[port]->name);
1142 
1143  /* Workaround BCom Errata:
1144  * enable and disable loopback mode if "NO HCD" occurs.
1145  */
1146  if (isrc & PHY_B_IS_NO_HDCL) {
1149  ctrl | PHY_CT_LOOP);
1151  ctrl & ~PHY_CT_LOOP);
1152  }
1153 
1154  if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1156 
1157 }
1158 
1159 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1160 {
1161  int i;
1162 
1165  GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1166  for (i = 0; i < PHY_RETRIES; i++) {
1167  udelay(1);
1168 
1170  return 0;
1171  }
1172 
1173  DBG(PFX "%s: phy write timeout port %x reg %x val %x\n",
1174  hw->dev[port]->name,
1175  port, reg, val);
1176  return -EIO;
1177 }
1178 
1179 static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1180 {
1181  int i;
1182 
1184  GM_SMI_CT_PHY_AD(hw->phy_addr)
1186 
1187  for (i = 0; i < PHY_RETRIES; i++) {
1188  udelay(1);
1190  goto ready;
1191  }
1192 
1193  return -ETIMEDOUT;
1194  ready:
1196  return 0;
1197 }
1198 
1199 static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1200 {
1201  u16 v = 0;
1202  if (__gm_phy_read(hw, port, reg, &v))
1203  DBG(PFX "%s: phy read timeout port %x reg %x val %x\n",
1204  hw->dev[port]->name,
1205  port, reg, v);
1206  return v;
1207 }
1208 
1209 /* Marvell Phy Initialization */
1210 static void yukon_init(struct skge_hw *hw, int port)
1211 {
1212  struct skge_port *skge = netdev_priv(hw->dev[port]);
1213  u16 ctrl, ct1000, adv;
1214 
1215  if (skge->autoneg == AUTONEG_ENABLE) {
1217 
1218  ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1221 
1222  ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1223 
1225  }
1226 
1228  if (skge->autoneg == AUTONEG_DISABLE)
1229  ctrl &= ~PHY_CT_ANE;
1230 
1231  ctrl |= PHY_CT_RESET;
1233 
1234  ctrl = 0;
1235  ct1000 = 0;
1236  adv = PHY_AN_CSMA;
1237 
1238  if (skge->autoneg == AUTONEG_ENABLE) {
1239  if (hw->copper) {
1241  ct1000 |= PHY_M_1000C_AFD;
1243  ct1000 |= PHY_M_1000C_AHD;
1245  adv |= PHY_M_AN_100_FD;
1247  adv |= PHY_M_AN_100_HD;
1249  adv |= PHY_M_AN_10_FD;
1251  adv |= PHY_M_AN_10_HD;
1252 
1253  /* Set Flow-control capabilities */
1254  adv |= phy_pause_map[skge->flow_control];
1255  } else {
1257  adv |= PHY_M_AN_1000X_AFD;
1259  adv |= PHY_M_AN_1000X_AHD;
1260 
1261  adv |= fiber_pause_map[skge->flow_control];
1262  }
1263 
1264  /* Restart Auto-negotiation */
1266  } else {
1267  /* forced speed/duplex settings */
1268  ct1000 = PHY_M_1000C_MSE;
1269 
1270  if (skge->duplex == DUPLEX_FULL)
1271  ctrl |= PHY_CT_DUP_MD;
1272 
1273  switch (skge->speed) {
1274  case SPEED_1000:
1275  ctrl |= PHY_CT_SP1000;
1276  break;
1277  case SPEED_100:
1278  ctrl |= PHY_CT_SP100;
1279  break;
1280  }
1281 
1282  ctrl |= PHY_CT_RESET;
1283  }
1284 
1286 
1289 
1290  /* Enable phy interrupt on autonegotiation complete (or link up) */
1291  if (skge->autoneg == AUTONEG_ENABLE)
1293  else
1295 }
1296 
1297 static void yukon_reset(struct skge_hw *hw, int port)
1298 {
1299  gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1300  gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1304 
1308 }
1309 
1310 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1311 static int is_yukon_lite_a0(struct skge_hw *hw)
1312 {
1313  u32 reg;
1314  int ret;
1315 
1316  if (hw->chip_id != CHIP_ID_YUKON)
1317  return 0;
1318 
1319  reg = skge_read32(hw, B2_FAR);
1320  skge_write8(hw, B2_FAR + 3, 0xff);
1321  ret = (skge_read8(hw, B2_FAR + 3) != 0);
1323  return ret;
1324 }
1325 
1326 static void yukon_mac_init(struct skge_hw *hw, int port)
1327 {
1328  struct skge_port *skge = netdev_priv(hw->dev[port]);
1329  int i;
1330  u32 reg;
1331  const u8 *addr = hw->dev[port]->ll_addr;
1332 
1333  /* WA code for COMA mode -- set PHY reset */
1334  if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1335  hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1336  reg = skge_read32(hw, B2_GP_IO);
1337  reg |= GP_DIR_9 | GP_IO_9;
1339  }
1340 
1341  /* hard reset */
1344 
1345  /* WA code for COMA mode -- clear PHY reset */
1346  if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1347  hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1348  reg = skge_read32(hw, B2_GP_IO);
1349  reg |= GP_DIR_9;
1350  reg &= ~GP_IO_9;
1352  }
1353 
1354  /* Set hardware config mode */
1358 
1359  /* Clear GMC reset */
1363 
1364  if (skge->autoneg == AUTONEG_DISABLE) {
1368 
1369  switch (skge->speed) {
1370  case SPEED_1000:
1371  reg &= ~GM_GPCR_SPEED_100;
1373  break;
1374  case SPEED_100:
1375  reg &= ~GM_GPCR_SPEED_1000;
1377  break;
1378  case SPEED_10:
1380  break;
1381  }
1382 
1383  if (skge->duplex == DUPLEX_FULL)
1384  reg |= GM_GPCR_DUP_FULL;
1385  } else
1387 
1388  switch (skge->flow_control) {
1389  case FLOW_MODE_NONE:
1392  break;
1393  case FLOW_MODE_LOC_SEND:
1394  /* disable Rx flow-control */
1396  break;
1397  case FLOW_MODE_SYMMETRIC:
1398  case FLOW_MODE_SYM_OR_REM:
1399  /* enable Tx & Rx flow-control */
1400  break;
1401  }
1402 
1405 
1406  yukon_init(hw, port);
1407 
1408  /* MIB clear */
1411 
1412  for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1413  gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1415 
1416  /* transmit control */
1418 
1419  /* receive control reg: unicast + multicast + no FCS */
1422 
1423  /* transmit flow control */
1424  gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1425 
1426  /* transmit parameter */
1431 
1432  /* configure the Serial Mode Register */
1436 
1438 
1439  /* physical address: used for pause frames */
1441  /* virtual address for data */
1443 
1444  /* enable interrupt mask for counter overflows */
1448 
1449  /* Initialize Mac Fifo */
1450 
1451  /* Configure Rx MAC FIFO */
1454 
1455  /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1456  if (is_yukon_lite_a0(hw))
1457  reg &= ~GMF_RX_F_FL_ON;
1458 
1461  /*
1462  * because Pause Packet Truncation in GMAC is not working
1463  * we have to increase the Flush Threshold to 64 bytes
1464  * in order to flush pause packets in Rx FIFO on Yukon-1
1465  */
1467 
1468  /* Configure Tx MAC FIFO */
1471 }
1472 
1473 /* Go into power down mode */
1474 static void yukon_suspend(struct skge_hw *hw, int port)
1475 {
1476  u16 ctrl;
1477 
1481 
1483  ctrl |= PHY_CT_RESET;
1485 
1486  /* switch IEEE compatible power down mode on */
1488  ctrl |= PHY_CT_PDOWN;
1490 }
1491 
1492 static void yukon_stop(struct skge_port *skge)
1493 {
1494  struct skge_hw *hw = skge->hw;
1495  int port = skge->port;
1496 
1498  yukon_reset(hw, port);
1499 
1504 
1505  yukon_suspend(hw, port);
1506 
1507  /* set GPHY Control reset */
1510 }
1511 
1512 static u16 yukon_speed(const struct skge_hw *hw __unused, u16 aux)
1513 {
1514  switch (aux & PHY_M_PS_SPEED_MSK) {
1515  case PHY_M_PS_SPEED_1000:
1516  return SPEED_1000;
1517  case PHY_M_PS_SPEED_100:
1518  return SPEED_100;
1519  default:
1520  return SPEED_10;
1521  }
1522 }
1523 
1524 static void yukon_link_up(struct skge_port *skge)
1525 {
1526  struct skge_hw *hw = skge->hw;
1527  int port = skge->port;
1528  u16 reg;
1529 
1530  /* Enable Transmit FIFO Underrun */
1532 
1534  if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1535  reg |= GM_GPCR_DUP_FULL;
1536 
1537  /* enable Rx/Tx */
1540 
1542  skge_link_up(skge);
1543 }
1544 
1545 static void yukon_link_down(struct skge_port *skge)
1546 {
1547  struct skge_hw *hw = skge->hw;
1548  int port = skge->port;
1549  u16 ctrl;
1550 
1554 
1555  if (skge->flow_status == FLOW_STAT_REM_SEND) {
1557  ctrl |= PHY_M_AN_ASP;
1558  /* restore Asymmetric Pause bit */
1560  }
1561 
1562  skge_link_down(skge);
1563 
1564  yukon_init(hw, port);
1565 }
1566 
1567 static void yukon_phy_intr(struct skge_port *skge)
1568 {
1569  struct skge_hw *hw = skge->hw;
1570  int port = skge->port;
1571  const char *reason = NULL;
1572  u16 istatus, phystat;
1573 
1574  istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1575  phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1576 
1577  DBGIO(PFX "%s: phy interrupt status 0x%x 0x%x\n",
1578  skge->netdev->name, istatus, phystat);
1579 
1580  if (istatus & PHY_M_IS_AN_COMPL) {
1582  & PHY_M_AN_RF) {
1583  reason = "remote fault";
1584  goto failed;
1585  }
1586 
1588  reason = "master/slave fault";
1589  goto failed;
1590  }
1591 
1592  if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1593  reason = "speed/duplex";
1594  goto failed;
1595  }
1596 
1597  skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
1599  skge->speed = yukon_speed(hw, phystat);
1600 
1601  /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1602  switch (phystat & PHY_M_PS_PAUSE_MSK) {
1603  case PHY_M_PS_PAUSE_MSK:
1605  break;
1606  case PHY_M_PS_RX_P_EN:
1608  break;
1609  case PHY_M_PS_TX_P_EN:
1611  break;
1612  default:
1613  skge->flow_status = FLOW_STAT_NONE;
1614  }
1615 
1616  if (skge->flow_status == FLOW_STAT_NONE ||
1617  (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
1619  else
1621  yukon_link_up(skge);
1622  return;
1623  }
1624 
1625  if (istatus & PHY_M_IS_LSP_CHANGE)
1626  skge->speed = yukon_speed(hw, phystat);
1627 
1628  if (istatus & PHY_M_IS_DUP_CHANGE)
1629  skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1630  if (istatus & PHY_M_IS_LST_CHANGE) {
1631  if (phystat & PHY_M_PS_LINK_UP)
1632  yukon_link_up(skge);
1633  else
1634  yukon_link_down(skge);
1635  }
1636  return;
1637  failed:
1638  DBG(PFX "%s: autonegotiation failed (%s)\n",
1639  skge->netdev->name, reason);
1640 
1641  /* XXX restart autonegotiation? */
1642 }
1643 
1644 static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
1645 {
1646  u32 end;
1647 
1648  start /= 8;
1649  len /= 8;
1650  end = start + len - 1;
1651 
1656  skge_write32(hw, RB_ADDR(q, RB_END), end);
1657 
1658  if (q == Q_R1 || q == Q_R2) {
1659  /* Set thresholds on receive queue's */
1661  start + (2*len)/3);
1663  start + (len/3));
1664  } else {
1665  /* Enable store & forward on Tx queue's because
1666  * Tx FIFO is only 4K on Genesis and 1K on Yukon
1667  */
1669  }
1670 
1672 }
1673 
1674 /* Setup Bus Memory Interface */
1675 static void skge_qset(struct skge_port *skge, u16 q,
1676  const struct skge_element *e)
1677 {
1678  struct skge_hw *hw = skge->hw;
1679  u32 watermark = 0x600;
1680  u64 base = skge->dma + (e->desc - skge->mem);
1681 
1682  /* optimization to reduce window on 32bit/33mhz */
1683  if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
1684  watermark /= 2;
1685 
1687  skge_write32(hw, Q_ADDR(q, Q_F), watermark);
1688  skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
1689  skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
1690 }
1691 
1692 void skge_free(struct net_device *dev)
1693 {
1694  struct skge_port *skge = netdev_priv(dev);
1695 
1696  free(skge->rx_ring.start);
1697  skge->rx_ring.start = NULL;
1698 
1699  free(skge->tx_ring.start);
1700  skge->tx_ring.start = NULL;
1701 
1702  free_phys(skge->mem, RING_SIZE);
1703  skge->mem = NULL;
1704  skge->dma = 0;
1705 }
1706 
1707 static int skge_up(struct net_device *dev)
1708 {
1709  struct skge_port *skge = netdev_priv(dev);
1710  struct skge_hw *hw = skge->hw;
1711  int port = skge->port;
1712  u32 chunk, ram_addr;
1713  int err;
1714 
1715  DBG2(PFX "%s: enabling interface\n", dev->name);
1716 
1718  skge->dma = virt_to_bus(skge->mem);
1719  if (!skge->mem)
1720  return -ENOMEM;
1721  memset(skge->mem, 0, RING_SIZE);
1722 
1723  assert(!(skge->dma & 7));
1724 
1725  /* FIXME: find out whether 64 bit iPXE will be loaded > 4GB */
1726  if ((u64)skge->dma >> 32 != ((u64) skge->dma + RING_SIZE) >> 32) {
1727  DBG(PFX "pci_alloc_consistent region crosses 4G boundary\n");
1728  err = -EINVAL;
1729  goto err;
1730  }
1731 
1732  err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma, NUM_RX_DESC);
1733  if (err)
1734  goto err;
1735 
1736  /* this call relies on e->iob and d->control to be 0
1737  * This is assured by calling memset() on skge->mem and using zalloc()
1738  * for the skge_element structures.
1739  */
1741 
1742  err = skge_ring_alloc(&skge->tx_ring, skge->mem + RX_RING_SIZE,
1743  skge->dma + RX_RING_SIZE, NUM_TX_DESC);
1744  if (err)
1745  goto err;
1746 
1747  /* Initialize MAC */
1748  if (hw->chip_id == CHIP_ID_GENESIS)
1750  else
1752 
1753  /* Configure RAMbuffers - equally between ports and tx/rx */
1754  chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2);
1755  ram_addr = hw->ram_offset + 2 * chunk * port;
1756 
1757  skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
1758  skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
1759 
1760  assert(!(skge->tx_ring.to_use != skge->tx_ring.to_clean));
1761  skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
1762  skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
1763 
1764  /* Start receiver BMU */
1765  wmb();
1767  skge_led(skge, LED_MODE_ON);
1768 
1769  hw->intr_mask |= portmask[port];
1770  skge_write32(hw, B0_IMSK, hw->intr_mask);
1771 
1772  return 0;
1773 
1774  err:
1775  skge_rx_clean(skge);
1776  skge_free(dev);
1777 
1778  return err;
1779 }
1780 
1781 /* stop receiver */
1782 static void skge_rx_stop(struct skge_hw *hw, int port)
1783 {
1788 }
1789 
1790 static void skge_down(struct net_device *dev)
1791 {
1792  struct skge_port *skge = netdev_priv(dev);
1793  struct skge_hw *hw = skge->hw;
1794  int port = skge->port;
1795 
1796  if (skge->mem == NULL)
1797  return;
1798 
1799  DBG2(PFX "%s: disabling interface\n", dev->name);
1800 
1801  if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
1802  skge->use_xm_link_timer = 0;
1803 
1805 
1806  hw->intr_mask &= ~portmask[port];
1807  skge_write32(hw, B0_IMSK, hw->intr_mask);
1808 
1809  skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
1810  if (hw->chip_id == CHIP_ID_GENESIS)
1811  genesis_stop(skge);
1812  else
1813  yukon_stop(skge);
1814 
1815  /* Stop transmitter */
1819 
1820 
1821  /* Disable Force Sync bit and Enable Alloc bit */
1824 
1825  /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1828 
1829  /* Reset PCI FIFO */
1832 
1833  /* Reset the RAM Buffer async Tx queue */
1835 
1836  skge_rx_stop(hw, port);
1837 
1838  if (hw->chip_id == CHIP_ID_GENESIS) {
1841  } else {
1844  }
1845 
1846  skge_led(skge, LED_MODE_OFF);
1847 
1848  skge_tx_clean(dev);
1849 
1850  skge_rx_clean(skge);
1851 
1852  skge_free(dev);
1853  return;
1854 }
1855 
1856 static inline int skge_tx_avail(const struct skge_ring *ring)
1857 {
1858  mb();
1859  return ((ring->to_clean > ring->to_use) ? 0 : NUM_TX_DESC)
1860  + (ring->to_clean - ring->to_use) - 1;
1861 }
1862 
1863 static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob)
1864 {
1865  struct skge_port *skge = netdev_priv(dev);
1866  struct skge_hw *hw = skge->hw;
1867  struct skge_element *e;
1868  struct skge_tx_desc *td;
1869  u32 control, len;
1870  u64 map;
1871 
1872  if (skge_tx_avail(&skge->tx_ring) < 1)
1873  return -EBUSY;
1874 
1875  e = skge->tx_ring.to_use;
1876  td = e->desc;
1877  assert(!(td->control & BMU_OWN));
1878  e->iob = iob;
1879  len = iob_len(iob);
1880  map = virt_to_bus(iob->data);
1881 
1882  td->dma_lo = map;
1883  td->dma_hi = map >> 32;
1884 
1885  control = BMU_CHECK;
1886 
1888  /* Make sure all the descriptors written */
1889  wmb();
1890  td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
1891  wmb();
1892 
1894 
1895  DBGIO(PFX "%s: tx queued, slot %td, len %d\n",
1896  dev->name, e - skge->tx_ring.start, (unsigned int)len);
1897 
1898  skge->tx_ring.to_use = e->next;
1899  wmb();
1900 
1901  if (skge_tx_avail(&skge->tx_ring) <= 1) {
1902  DBG(PFX "%s: transmit queue full\n", dev->name);
1903  }
1904 
1905  return 0;
1906 }
1907 
1908 /* Free all buffers in transmit ring */
1909 static void skge_tx_clean(struct net_device *dev)
1910 {
1911  struct skge_port *skge = netdev_priv(dev);
1912  struct skge_element *e;
1913 
1914  for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
1915  struct skge_tx_desc *td = e->desc;
1916  td->control = 0;
1917  }
1918 
1919  skge->tx_ring.to_clean = e;
1920 }
1921 
1922 static inline u16 phy_length(const struct skge_hw *hw, u32 status)
1923 {
1924  if (hw->chip_id == CHIP_ID_GENESIS)
1925  return status >> XMR_FS_LEN_SHIFT;
1926  else
1927  return status >> GMR_FS_LEN_SHIFT;
1928 }
1929 
1930 static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
1931 {
1932  if (hw->chip_id == CHIP_ID_GENESIS)
1933  return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
1934  else
1935  return (status & GMR_FS_ANY_ERR) ||
1936  (status & GMR_FS_RX_OK) == 0;
1937 }
1938 
1939 /* Free all buffers in Tx ring which are no longer owned by device */
1940 static void skge_tx_done(struct net_device *dev)
1941 {
1942  struct skge_port *skge = netdev_priv(dev);
1943  struct skge_ring *ring = &skge->tx_ring;
1944  struct skge_element *e;
1945 
1946  skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
1947 
1948  for (e = ring->to_clean; e != ring->to_use; e = e->next) {
1949  u32 control = ((const struct skge_tx_desc *) e->desc)->control;
1950 
1951  if (control & BMU_OWN)
1952  break;
1953 
1954  netdev_tx_complete(dev, e->iob);
1955  }
1956  skge->tx_ring.to_clean = e;
1957 
1958  /* Can run lockless until we need to synchronize to restart queue. */
1959  mb();
1960 }
1961 
1962 static void skge_rx_refill(struct net_device *dev)
1963 {
1964  struct skge_port *skge = netdev_priv(dev);
1965  struct skge_ring *ring = &skge->rx_ring;
1966  struct skge_element *e;
1967  struct io_buffer *iob;
1968  struct skge_rx_desc *rd;
1969  u32 control;
1970  int i;
1971 
1972  for (i = 0; i < NUM_RX_DESC; i++) {
1973  e = ring->to_clean;
1974  rd = e->desc;
1975  iob = e->iob;
1976  control = rd->control;
1977 
1978  /* nothing to do here */
1979  if (iob || (control & BMU_OWN))
1980  continue;
1981 
1982  DBG2("refilling rx desc %zd: ", (ring->to_clean - ring->start));
1983 
1984  iob = alloc_iob(RX_BUF_SIZE);
1985  if (iob) {
1986  skge_rx_setup(skge, e, iob, RX_BUF_SIZE);
1987  } else {
1988  DBG("descr %zd: alloc_iob() failed\n",
1989  (ring->to_clean - ring->start));
1990  /* We pass the descriptor to the NIC even if the
1991  * allocation failed. The card will stop as soon as it
1992  * encounters a descriptor with the OWN bit set to 0,
1993  * thus never getting to the next descriptor that might
1994  * contain a valid io_buffer. This would effectively
1995  * stall the receive.
1996  */
1997  skge_rx_setup(skge, e, NULL, 0);
1998  }
1999 
2000  ring->to_clean = e->next;
2001  }
2002 }
2003 
2004 static void skge_rx_done(struct net_device *dev)
2005 {
2006  struct skge_port *skge = netdev_priv(dev);
2007  struct skge_ring *ring = &skge->rx_ring;
2008  struct skge_rx_desc *rd;
2009  struct skge_element *e;
2010  struct io_buffer *iob;
2011  u32 control;
2012  u16 len;
2013  int i;
2014 
2015  e = ring->to_clean;
2016  for (i = 0; i < NUM_RX_DESC; i++) {
2017  iob = e->iob;
2018  rd = e->desc;
2019 
2020  rmb();
2021  control = rd->control;
2022 
2023  if ((control & BMU_OWN))
2024  break;
2025 
2026  if (!iob)
2027  continue;
2028 
2029  len = control & BMU_BBC;
2030 
2031  /* catch RX errors */
2032  if ((bad_phy_status(skge->hw, rd->status)) ||
2033  (phy_length(skge->hw, rd->status) != len)) {
2034  /* report receive errors */
2035  DBG("rx error\n");
2036  netdev_rx_err(dev, iob, -EIO);
2037  } else {
2038  DBG2("received packet, len %d\n", len);
2039  iob_put(iob, len);
2040  netdev_rx(dev, iob);
2041  }
2042 
2043  /* io_buffer passed to core, make sure we don't reuse it */
2044  e->iob = NULL;
2045 
2046  e = e->next;
2047  }
2048  skge_rx_refill(dev);
2049 }
2050 
2051 static void skge_poll(struct net_device *dev)
2052 {
2053  struct skge_port *skge = netdev_priv(dev);
2054  struct skge_hw *hw = skge->hw;
2055  u32 status;
2056 
2057  /* reading this register ACKs interrupts */
2059 
2060  /* Link event? */
2061  if (status & IS_EXT_REG) {
2062  skge_phyirq(hw);
2063  if (skge->use_xm_link_timer)
2064  xm_link_timer(skge);
2065  }
2066 
2067  skge_tx_done(dev);
2068 
2070 
2071  skge_rx_done(dev);
2072 
2073  /* restart receiver */
2074  wmb();
2076 
2078 
2079  return;
2080 }
2081 
2082 static void skge_phyirq(struct skge_hw *hw)
2083 {
2084  int port;
2085 
2086  for (port = 0; port < hw->ports; port++) {
2087  struct net_device *dev = hw->dev[port];
2088  struct skge_port *skge = netdev_priv(dev);
2089 
2090  if (hw->chip_id != CHIP_ID_GENESIS)
2091  yukon_phy_intr(skge);
2092  else if (hw->phy_type == SK_PHY_BCOM)
2093  bcom_phy_intr(skge);
2094  }
2095 
2096  hw->intr_mask |= IS_EXT_REG;
2097  skge_write32(hw, B0_IMSK, hw->intr_mask);
2099 }
2100 
2101 static const struct {
2103  const char *name;
2104 } skge_chips[] = {
2105  { CHIP_ID_GENESIS, "Genesis" },
2106  { CHIP_ID_YUKON, "Yukon" },
2107  { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
2108  { CHIP_ID_YUKON_LP, "Yukon-LP"},
2109 };
2110 
2111 static const char *skge_board_name(const struct skge_hw *hw)
2112 {
2113  unsigned int i;
2114  static char buf[16];
2115 
2116  for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
2117  if (skge_chips[i].id == hw->chip_id)
2118  return skge_chips[i].name;
2119 
2120  snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
2121  return buf;
2122 }
2123 
2124 
2125 /*
2126  * Setup the board data structure, but don't bring up
2127  * the port(s)
2128  */
2129 static int skge_reset(struct skge_hw *hw)
2130 {
2131  u32 reg;
2132  u16 ctst, pci_status;
2133  u8 t8, mac_cfg, pmd_type;
2134  int i;
2135 
2136  ctst = skge_read16(hw, B0_CTST);
2137 
2138  /* do a SW reset */
2141 
2142  /* clear PCI errors, if any */
2145 
2146  pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2148  pci_status | PCI_STATUS_ERROR_BITS);
2151 
2152  /* restore CLK_RUN bits (for Yukon-Lite) */
2155 
2156  hw->chip_id = skge_read8(hw, B2_CHIP_ID);
2157  hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
2158  pmd_type = skge_read8(hw, B2_PMD_TYP);
2159  hw->copper = (pmd_type == 'T' || pmd_type == '1');
2160 
2161  switch (hw->chip_id) {
2162  case CHIP_ID_GENESIS:
2163  switch (hw->phy_type) {
2164  case SK_PHY_XMAC:
2165  hw->phy_addr = PHY_ADDR_XMAC;
2166  break;
2167  case SK_PHY_BCOM:
2168  hw->phy_addr = PHY_ADDR_BCOM;
2169  break;
2170  default:
2171  DBG(PFX "unsupported phy type 0x%x\n",
2172  hw->phy_type);
2173  return -EOPNOTSUPP;
2174  }
2175  break;
2176 
2177  case CHIP_ID_YUKON:
2178  case CHIP_ID_YUKON_LITE:
2179  case CHIP_ID_YUKON_LP:
2180  if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
2181  hw->copper = 1;
2182 
2183  hw->phy_addr = PHY_ADDR_MARV;
2184  break;
2185 
2186  default:
2187  DBG(PFX "unsupported chip type 0x%x\n",
2188  hw->chip_id);
2189  return -EOPNOTSUPP;
2190  }
2191 
2192  mac_cfg = skge_read8(hw, B2_MAC_CFG);
2193  hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
2194  hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
2195 
2196  /* read the adapters RAM size */
2197  t8 = skge_read8(hw, B2_E_0);
2198  if (hw->chip_id == CHIP_ID_GENESIS) {
2199  if (t8 == 3) {
2200  /* special case: 4 x 64k x 36, offset = 0x80000 */
2201  hw->ram_size = 0x100000;
2202  hw->ram_offset = 0x80000;
2203  } else
2204  hw->ram_size = t8 * 512;
2205  }
2206  else if (t8 == 0)
2207  hw->ram_size = 0x20000;
2208  else
2209  hw->ram_size = t8 * 4096;
2210 
2211  hw->intr_mask = IS_HW_ERR;
2212 
2213  /* Use PHY IRQ for all but fiber based Genesis board */
2214  if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC))
2215  hw->intr_mask |= IS_EXT_REG;
2216 
2217  if (hw->chip_id == CHIP_ID_GENESIS)
2218  genesis_init(hw);
2219  else {
2220  /* switch power to VCC (WA for VAUX problem) */
2223 
2224  /* avoid boards with stuck Hardware error bits */
2225  if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
2227  DBG(PFX "stuck hardware sensor bit\n");
2228  hw->intr_mask &= ~IS_HW_ERR;
2229  }
2230 
2231  /* Clear PHY COMA */
2234  reg &= ~PCI_PHY_COMA;
2237 
2238 
2239  for (i = 0; i < hw->ports; i++) {
2242  }
2243  }
2244 
2245  /* turn off hardware timer (unused) */
2249 
2250  /* enable the Tx Arbiters */
2251  for (i = 0; i < hw->ports; i++)
2253 
2254  /* Initialize ram interface */
2256 
2269 
2271 
2272  /* Set interrupt moderation for Transmit only
2273  * Receive interrupts avoided by NAPI
2274  */
2278 
2279  skge_write32(hw, B0_IMSK, hw->intr_mask);
2280 
2281  for (i = 0; i < hw->ports; i++) {
2282  if (hw->chip_id == CHIP_ID_GENESIS)
2283  genesis_reset(hw, i);
2284  else
2285  yukon_reset(hw, i);
2286  }
2287 
2288  return 0;
2289 }
2290 
2291 /* Initialize network device */
2292 static struct net_device *skge_devinit(struct skge_hw *hw, int port,
2293  int highmem __unused)
2294 {
2295  struct skge_port *skge;
2296  struct net_device *dev = alloc_etherdev(sizeof(*skge));
2297 
2298  if (!dev) {
2299  DBG(PFX "etherdev alloc failed\n");
2300  return NULL;
2301  }
2302 
2303  dev->dev = &hw->pdev->dev;
2304 
2305  skge = netdev_priv(dev);
2306  skge->netdev = dev;
2307  skge->hw = hw;
2308 
2309  /* Auto speed and flow control */
2310  skge->autoneg = AUTONEG_ENABLE;
2312  skge->duplex = -1;
2313  skge->speed = -1;
2315 
2316  hw->dev[port] = dev;
2317 
2318  skge->port = port;
2319 
2320  /* read the mac address */
2321  memcpy(dev->hw_addr, (void *) (hw->regs + B2_MAC_1 + port*8), ETH_ALEN);
2322 
2323  return dev;
2324 }
2325 
2326 static void skge_show_addr(struct net_device *dev)
2327 {
2328  DBG2(PFX "%s: addr %s\n",
2329  dev->name, netdev_addr(dev));
2330 }
2331 
2332 static int skge_probe(struct pci_device *pdev)
2333 {
2334  struct net_device *dev, *dev1;
2335  struct skge_hw *hw;
2336  int err, using_dac = 0;
2337 
2339 
2340  err = -ENOMEM;
2341  hw = zalloc(sizeof(*hw));
2342  if (!hw) {
2343  DBG(PFX "cannot allocate hardware struct\n");
2344  goto err_out_free_regions;
2345  }
2346 
2347  hw->pdev = pdev;
2348 
2349  hw->regs = (unsigned long)pci_ioremap(pdev,
2351  SKGE_REG_SIZE);
2352  if (!hw->regs) {
2353  DBG(PFX "cannot map device registers\n");
2354  goto err_out_free_hw;
2355  }
2356 
2357  err = skge_reset(hw);
2358  if (err)
2359  goto err_out_iounmap;
2360 
2361  DBG(PFX " addr 0x%llx irq %d chip %s rev %d\n",
2362  (unsigned long long)pdev->ioaddr, pdev->irq,
2363  skge_board_name(hw), hw->chip_rev);
2364 
2365  dev = skge_devinit(hw, 0, using_dac);
2366  if (!dev)
2367  goto err_out_led_off;
2368 
2370 
2371  err = register_netdev(dev);
2372  if (err) {
2373  DBG(PFX "cannot register net device\n");
2374  goto err_out_free_netdev;
2375  }
2376 
2378 
2379  if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
2380  if (register_netdev(dev1) == 0)
2381  skge_show_addr(dev1);
2382  else {
2383  /* Failure to register second port need not be fatal */
2384  DBG(PFX "register of second port failed\n");
2385  hw->dev[1] = NULL;
2386  netdev_nullify(dev1);
2387  netdev_put(dev1);
2388  }
2389  }
2391 
2392  return 0;
2393 
2394 err_out_free_netdev:
2396  netdev_put(dev);
2397 err_out_led_off:
2399 err_out_iounmap:
2400  iounmap((void*)hw->regs);
2401 err_out_free_hw:
2402  free(hw);
2403 err_out_free_regions:
2405  return err;
2406 }
2407 
2408 static void skge_remove(struct pci_device *pdev)
2409 {
2410  struct skge_hw *hw = pci_get_drvdata(pdev);
2411  struct net_device *dev0, *dev1;
2412 
2413  if (!hw)
2414  return;
2415 
2416  if ((dev1 = hw->dev[1]))
2417  unregister_netdev(dev1);
2418  dev0 = hw->dev[0];
2419  unregister_netdev(dev0);
2420 
2421  hw->intr_mask = 0;
2422  skge_write32(hw, B0_IMSK, 0);
2424 
2427 
2428  if (dev1) {
2429  netdev_nullify(dev1);
2430  netdev_put(dev1);
2431  }
2432  netdev_nullify(dev0);
2433  netdev_put(dev0);
2434 
2435  iounmap((void*)hw->regs);
2436  free(hw);
2437  pci_set_drvdata(pdev, NULL);
2438 }
2439 
2440 /*
2441  * Enable or disable IRQ masking.
2442  *
2443  * @v netdev Device to control.
2444  * @v enable Zero to mask off IRQ, non-zero to enable IRQ.
2445  *
2446  * This is a iPXE Network Driver API function.
2447  */
2448 static void skge_net_irq ( struct net_device *dev, int enable ) {
2449  struct skge_port *skge = netdev_priv(dev);
2450  struct skge_hw *hw = skge->hw;
2451 
2452  if (enable)
2453  hw->intr_mask |= portmask[skge->port];
2454  else
2455  hw->intr_mask &= ~portmask[skge->port];
2456  skge_write32(hw, B0_IMSK, hw->intr_mask);
2457 }
2458 
2459 struct pci_driver skge_driver __pci_driver = {
2460  .ids = skge_id_table,
2461  .id_count = ( sizeof (skge_id_table) / sizeof (skge_id_table[0]) ),
2462  .probe = skge_probe,
2463  .remove = skge_remove
2464 };
2465 
#define XM_PAUSE_MODE
Definition: skge.h:2336
#define SPEED_1000
Definition: atl1e.h:52
uint16_t u16
Definition: stdint.h:21
static void yukon_link_down(struct skge_port *skge)
Definition: skge.c:1545
Definition: skge.h:108
#define EINVAL
Invalid argument.
Definition: errno.h:428
static void yukon_reset(struct skge_hw *hw, int port)
Definition: skge.c:1297
u32 dma_lo
Definition: skge.h:2438
uint8_t irq
Interrupt number.
Definition: pci.h:211
static u16 xm_read16(const struct skge_hw *hw, int port, int reg)
Definition: skge.h:2560
Definition: skge.h:2095
static void * netdev_priv(struct net_device *netdev)
Get driver private area for this network device.
Definition: netdevice.h:566
Definition: skge.h:360
static void netdev_tx_complete(struct net_device *netdev, struct io_buffer *iobuf)
Complete network transmission.
Definition: netdevice.h:746
int port
Definition: skge.h:2498
wmb()
static const u32 portmask[]
Definition: skge.c:87
#define iob_put(iobuf, len)
Definition: iobuf.h:116
void netdev_rx_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Discard received packet.
Definition: netdevice.c:501
static void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
Definition: skge.h:2571
#define IPG_DATA_DEF
Definition: skge.h:1858
static void xm_link_timer(struct skge_port *skge)
Definition: skge.c:827
Definition: skge.h:377
Definition: skge.h:524
A PCI driver.
Definition: pci.h:227
#define EBUSY
Device or resource busy.
Definition: errno.h:338
static void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
Definition: skge.h:2607
static void skge_net_irq(struct net_device *dev, int enable)
Definition: skge.c:2448
struct net_device * dev[2]
Definition: skge.h:2464
static unsigned int unsigned int reg
Definition: myson.h:162
static void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
Definition: skge.h:2565
#define GM_SMI_CT_PHY_AD(x)
Definition: skge.h:1869
#define PHY_B_DEF_MSK
Definition: skge.h:1328
struct io_buffer * iob
Definition: skge.h:2450
Definition: skge.h:369
Definition: skge.h:81
#define PHY_M_LED_MO_RX(x)
Definition: skge.h:1567
#define SUPPORTED_1000baseT_Half
Definition: skge.h:62
static u32 hwkhz(const struct skge_hw *hw)
Definition: skge.c:121
u16 csum1_start
Definition: skge.h:2432
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
static void skge_show_addr(struct net_device *dev)
Definition: skge.c:2326
u32 advertising
Definition: skge.h:2508
#define RX_BUF_SIZE
Definition: 3c90x.h:269
Definition: skge.h:519
unsigned long ioaddr
I/O address.
Definition: pci.h:203
static void skge_rx_reuse(struct skge_element *e, unsigned int size)
Definition: skge.c:364
Error codes.
#define SUPPORTED_Autoneg
Definition: skge.h:64
#define TX_COL_THR(x)
Definition: skge.h:1818
#define PHY_M_LED_MO_DUP(x)
Definition: skge.h:1563
FILE_LICENCE(GPL2_ONLY)
Definition: skge.h:393
I/O buffers.
void free_iob(struct io_buffer *iobuf)
Free I/O buffer.
Definition: iobuf.c:145
#define PHY_RETRIES
Definition: skge.h:28
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:229
Definition: skge.h:540
#define ADVERTISED_100baseT_Half
Definition: bnx2.h:44
static void skge_link_down(struct skge_port *skge)
Definition: skge.c:406
int pci_write_config_word(struct pci_device *pci, unsigned int where, uint16_t value)
Write 16-bit word to PCI configuration space.
static void yukon_mac_init(struct skge_hw *hw, int port)
Definition: skge.c:1326
#define SUPPORTED_100baseT_Full
Definition: skge.h:61
static struct net_device_operations skge_operations
Definition: skge.c:76
static void yukon_phy_intr(struct skge_port *skge)
Definition: skge.c:1567
char name[40]
Name.
Definition: device.h:75
Definition: skge.h:349
static void xm_link_down(struct skge_hw *hw, int port)
Definition: skge.c:415
Definition: skge.h:254
static struct net_device * skge_devinit(struct skge_hw *hw, int port, int highmem __unused)
Definition: skge.c:2292
#define AUTONEG_ENABLE
Definition: bnx2.h:4584
static int skge_probe(struct pci_device *pdev)
Definition: skge.c:2332
static int skge_up(struct net_device *dev)
Definition: skge.c:1707
int use_xm_link_timer
Definition: skge.h:2512
static void *__malloc malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.h:62
u8 duplex
Definition: skge.h:2506
#define PCI_BASE_ADDRESS_0
Definition: pci.h:62
int pci_read_config_word(struct pci_device *pci, unsigned int where, uint16_t *value)
Read 16-bit word from PCI configuration space.
static void yukon_stop(struct skge_port *skge)
Definition: skge.c:1492
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:186
Definition: skge.h:718
Definition: skge.h:518
Definition: skge.h:356
uint32_t zero
Must be zero.
Definition: ntlm.h:24
Definition: skge.h:496
static void bcom_check_link(struct skge_hw *hw, int port)
Definition: skge.c:553
static void skge_phyirq(struct skge_hw *hw)
Definition: skge.c:2082
#define SPEED_10
Definition: atl1e.h:50
static void skge_remove(struct pci_device *pdev)
Definition: skge.c:2408
struct skge_ring tx_ring
Definition: skge.h:2500
#define SKGE_REG_SIZE
Definition: skge.h:34
Definition: skge.h:384
Definition: skge.h:558
#define SKGE_RING_ALIGN
Definition: skge.h:26
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:149
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:128
static const char * netdev_addr(struct net_device *netdev)
Get printable network device link-layer address.
Definition: netdevice.h:521
Definition: hw.c:16
#define NUM_RX_DESC
Definition: igbvf.h:281
#define SUPPORTED_10baseT_Half
Definition: skge.h:58
struct skge_hw * hw
Definition: skge.h:2496
static u32 skge_read32(const struct skge_hw *hw, int reg)
Definition: skge.h:2517
Dynamic memory allocation.
Definition: skge.h:550
#define ADVERTISED_1000baseT_Half
Definition: bnx2.h:46
u32 control
Definition: skge.h:2423
#define RB_ADDR(offs, queue)
Definition: skge.h:590
uint32_t start
Starting offset.
Definition: netvsc.h:12
uint8_t status
Status.
Definition: ena.h:16
#define SPEED_100
Definition: atl1e.h:51
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:498
static int is_yukon_lite_a0(struct skge_hw *hw)
Definition: skge.c:1311
#define TX_JAM_IPG_VAL(x)
Definition: skge.h:1841
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:341
#define rmb()
Definition: io.h:484
u32 dma
Definition: skge.h:2511
#define ENOMEM
Not enough space.
Definition: errno.h:534
uint32_t e
Definition: sha1.c:32
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define GM_GPCR_SPEED_1000
Definition: skge.h:1807
Definition: skge.h:82
static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
Definition: skge.c:1179
u8 port
Port number.
Definition: CIB_PRM.h:31
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
Definition: skge.h:109
enum pause_status flow_status
Definition: skge.h:2504
#define ETH_HLEN
Definition: if_ether.h:9
static const char * skge_board_name(const struct skge_hw *hw)
Definition: skge.c:2111
static const u16 fiber_pause_map[]
Definition: skge.c:544
#define PHY_M_LED_BLINK_RT(x)
Definition: skge.h:1528
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
Definition: skge.h:2075
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:555
Ethernet protocol.
int pci_read_config_dword(struct pci_device *pci, unsigned int where, uint32_t *value)
Read 32-bit dword from PCI configuration space.
#define PHY_B_AS_PAUSE_MSK
Definition: skge.h:1307
#define PCI_PHY_COMA
Definition: skge.h:11
#define PHY_M_EC_M_DSC(x)
Definition: skge.h:1504
#define PCI_STATUS_ERROR_BITS
Definition: skge.h:70
struct skge_element * start
Definition: skge.h:2456
static void xm_outhash(const struct skge_hw *hw, int port, int reg, const u8 *hash)
Definition: skge.h:2576
static void genesis_init(struct skge_hw *hw)
Definition: skge.c:480
static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
Definition: skge.c:458
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:768
static u32 xm_read32(const struct skge_hw *hw, int port, int reg)
Definition: skge.h:2552
#define GMAC_DEF_MSK
Definition: skge.h:2021
static u16 yukon_speed(const struct skge_hw *hw __unused, u16 aux)
Definition: skge.c:1512
static int xm_check_link(struct net_device *dev)
Definition: skge.c:752
void udelay(unsigned long usecs)
Delay for a fixed number of microseconds.
Definition: timer.c:60
u16 csum2_start
Definition: skge.h:2431
static int netdev_link_ok(struct net_device *netdev)
Check link state of network device.
Definition: netdevice.h:630
#define PHY_M_PS_PAUSE_MSK
Definition: skge.h:1451
#define PHY_M_EC_S_DSC(x)
Definition: skge.h:1505
#define DUPLEX_FULL
Definition: bnx2.h:111
uint64_t u64
Definition: stdint.h:25
u8 autoneg
Definition: skge.h:2505
static u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
Definition: skge.c:127
#define RING_SIZE
Definition: skge.h:32
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
static void bcom_phy_init(struct skge_port *skge)
Definition: skge.c:621
#define SUPPORTED_TP
Definition: skge.h:65
static u8 skge_read8(const struct skge_hw *hw, int reg)
Definition: skge.h:2527
#define TX_IPG_JAM_DATA(x)
Definition: skge.h:1842
Definition: skge.h:378
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition: netdevice.c:844
Definition: skge.h:707
#define PFX
Definition: sis190.h:34
#define DBGIO(...)
Definition: compiler.h:549
static void skge_write16(const struct skge_hw *hw, int reg, u16 val)
Definition: skge.h:2537
static struct pci_device_id skge_id_table[]
Definition: skge.c:46
static void skge_rx_clean(struct skge_port *skge)
Definition: skge.c:378
#define SK_PKT_TO_MAX
Definition: skge.h:441
Definition: skge.h:782
char unsigned long * num
Definition: xenstore.h:17
struct pci_device * pdev
Definition: skge.h:2462
#define GPC_HWCFG_GMII_COP
Definition: skge.h:1988
led_mode
Definition: skge.c:132
static void skge_tx_done(struct net_device *dev)
Definition: skge.c:1940
Definition: skge.h:557
Definition: skge.h:79
static void skge_link_up(struct skge_port *skge)
Definition: skge.c:394
#define XMR_DEF_MSK
Definition: skge.h:2386
static void(* free)(struct refcnt *refcnt))
Definition: refcnt.h:54
static const int rxqaddr[]
Definition: skge.c:86
void * zalloc(size_t size)
Allocate cleared memory.
Definition: malloc.c:624
Definition: skge.h:367
#define IPG_DATA_VAL(x)
Definition: skge.h:1857
struct skge_element * to_clean
Definition: skge.h:2454
PCI bus.
#define PHY_M_LED_PULS_DUR(x)
Definition: skge.h:1527
Definition: skge.h:2460
A PCI device.
Definition: pci.h:188
int register_netdev(struct net_device *netdev)
Register network device.
Definition: netdevice.c:667
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:151
u32 dma_hi
Definition: skge.h:2426
uint32_t control
Control.
Definition: myson.h:14
#define EOPNOTSUPP
Operation not supported on socket.
Definition: errno.h:604
A network device.
Definition: netdevice.h:348
static void skge_qset(struct skge_port *skge, u16 q, const struct skge_element *e)
Definition: skge.c:1675
static int skge_tx_avail(const struct skge_ring *ring)
Definition: skge.c:1856
static void xm_outaddr(const struct skge_hw *hw, int port, int reg, const u8 *addr)
Definition: skge.h:2585
static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
Definition: skge.c:1199
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:511
static void skge_poll(struct net_device *dev)
Definition: skge.c:2051
u16 csum2
Definition: skge.h:2429
#define TX_JAM_LEN_VAL(x)
Definition: skge.h:1840
#define ARRAY_SIZE(x)
Definition: efx_common.h:43
u32 addr
Definition: sky2.h:8
struct skge_ring rx_ring
Definition: skge.h:2501
#define SK_REG(port, reg)
Definition: skge.h:2548
Definition: skge.h:376
Definition: skge.h:2100
Definition: skge.h:262
Definition: skge.h:358
#define PCI_STATUS
PCI status.
Definition: pci.h:35
#define SUPPORTED_10baseT_Full
Definition: skge.h:59
static int skge_reset(struct skge_hw *hw)
Definition: skge.c:2129
#define ETH_ALEN
Definition: if_ether.h:8
A PCI device ID list entry.
Definition: pci.h:152
static void skge_rx_stop(struct skge_hw *hw, int port)
Definition: skge.c:1782
Definition: skge.h:523
#define SUPPORTED_100baseT_Half
Definition: skge.h:60
Definition: skge.h:78
static void genesis_stop(struct skge_port *skge)
Definition: skge.c:998
uint16_t base
Base address.
Definition: edd.h:14
static struct xen_remove_from_physmap * remove
Definition: xenmem.h:39
#define RX_RING_SIZE
Definition: 3c515.c:87
static void skge_rx_setup(struct skge_port *skge __unused, struct skge_element *e, struct io_buffer *iob, unsigned int bufsize)
Definition: skge.c:338
Definition: skge.h:560
void __asmcall int val
Definition: setjmp.h:28
Network device operations.
Definition: netdevice.h:213
uint16_t ext
Extended status.
Definition: ena.h:20
void netdev_rx(struct net_device *netdev, struct io_buffer *iobuf)
Add packet to receive queue.
Definition: netdevice.c:470
struct device * dev
Underlying hardware device.
Definition: netdevice.h:360
Network device management.
struct net_device * netdev
Definition: skge.h:2497
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:351
#define SK_RI_TO_53
Definition: skge.h:442
#define XM_EXM(reg)
Definition: skge.h:2089
static u16 gma_read16(const struct skge_hw *hw, int port, int reg)
Definition: skge.h:2596
static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
Definition: skge.c:1644
static void skge_tx_clean(struct net_device *dev)
Definition: skge.c:1909
u16 csum1
Definition: skge.h:2430
char name[NETDEV_NAME_LEN]
Name of this network device.
Definition: netdevice.h:358
#define PHY_M_LED_MO_1000(x)
Definition: skge.h:1566
Definition: skge.h:542
uint32_t len
Length.
Definition: ena.h:14
#define XM_DEF_MODE
Definition: skge.h:2337
enum pause_control flow_control
Definition: skge.h:2503
u32 status
Definition: skge.h:2427
int(* probe)(struct pci_device *pci)
Probe device.
Definition: pci.h:240
#define TX_COL_DEF
Definition: skge.h:1819
#define GPC_HWCFG_GMII_FIB
Definition: skge.h:1989
static void bcom_phy_intr(struct skge_port *skge)
Definition: skge.c:1129
static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base, size_t num)
Definition: skge.c:311
static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
Definition: skge.c:449
void skge_free(struct net_device *dev)
Definition: skge.c:1692
void * data
Start of data.
Definition: iobuf.h:44
const char * name
Definition: skge.c:2103
#define EIO
Input/output error.
Definition: errno.h:433
Definition: skge.h:334
#define DATA_BLIND_DEF
Definition: skge.h:1855
struct pci_driver skge_driver __pci_driver
Definition: skge.c:2459
void * mem
Definition: skge.h:2510
#define SUPPORTED_FIBRE
Definition: skge.h:66
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition: ethernet.c:264
u32 dma_lo
Definition: skge.h:2425
uint32_t d
Definition: md4.c:31
static void yukon_link_up(struct skge_port *skge)
Definition: skge.c:1524
#define Q_ADDR(reg, offs)
Definition: skge.h:534
u8 ctrl
Definition: sky2.h:10
int pci_write_config_dword(struct pci_device *pci, unsigned int where, uint32_t value)
Write 32-bit dword to PCI configuration space.
Definition: skge.h:719
#define SK_FACT_53
Definition: skge.h:865
Definition: skge.h:128
Definition: skge.h:562
static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
Definition: skge.c:1159
uint32_t end
Ending offset.
Definition: netvsc.h:18
uint8_t size
Entry size (in 32-bit words)
Definition: ena.h:16
void iounmap(volatile const void *io_addr)
Unmap I/O address.
static void yukon_init(struct skge_hw *hw, int port)
Definition: skge.c:1210
#define SUPPORTED_1000baseT_Full
Definition: skge.h:63
static int skge_xmit_frame(struct net_device *dev, struct io_buffer *iob)
Definition: skge.c:1863
#define ADVERTISED_10baseT_Full
Definition: bnx2.h:43
uint16_t reason
Rejection reason.
Definition: ib_mad.h:20
static void xm_phy_init(struct skge_port *skge)
Definition: skge.c:718
Definition: skge.h:2094
static void genesis_link_up(struct skge_port *skge)
Definition: skge.c:1045
u32 dma_hi
Definition: skge.h:2439
#define GM_SMI_CT_REG_AD(x)
Definition: skge.h:1870
#define AUTONEG_DISABLE
Definition: bnx2.h:4583
int snprintf(char *buf, size_t size, const char *fmt,...)
Write a formatted string to a buffer.
Definition: vsprintf.c:382
static void skge_down(struct net_device *dev)
Definition: skge.c:1790
static void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.h:77
#define PHY_M_EC_MAC_S(x)
Definition: skge.h:1506
void mb(void)
Memory barrier.
u8 id
Definition: skge.c:2102
#define DATA_BLIND_VAL(x)
Definition: skge.h:1854
static const int txqaddr[]
Definition: skge.c:85
#define PHY_M_LED_MO_10(x)
Definition: skge.h:1564
static void gma_set_addr(struct skge_hw *hw, int port, int reg, const u8 *addr)
Definition: skge.h:2612
#define GM_MIB_CNT_SIZE
Definition: skge.h:1716
uint8_t ll_addr[MAX_LL_ADDR_LEN]
Link-layer address.
Definition: netdevice.h:381
static int bad_phy_status(const struct skge_hw *hw, u32 status)
Definition: skge.c:1930
static void skge_rx_refill(struct net_device *dev)
Definition: skge.c:1962
#define DBG(...)
Print a debugging message.
Definition: compiler.h:498
#define GM_GPCR_AU_ALL_DIS
Definition: skge.h:1808
#define SK_BLK_DUR
Definition: skge.h:857
static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
Definition: skge.c:426
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
#define CSR_CLR_RESET
Definition: skge.h:793
uint8_t bufsize
Size of the packet, in bytes.
Definition: int13.h:12
#define CSR_SET_RESET
Definition: skge.h:790
#define PHY_M_LED_MO_100(x)
Definition: skge.h:1565
static void skge_rx_done(struct net_device *dev)
Definition: skge.c:2004
Definition: skge.h:112
u16 speed
Definition: skge.h:2507
Definition: skge.h:539
static void skge_led(struct skge_port *skge, enum led_mode mode)
Definition: skge.c:133
static void yukon_suspend(struct skge_hw *hw, int port)
Definition: skge.c:1474
#define ADVERTISED_1000baseT_Full
Definition: bnx2.h:47
#define NUM_TX_DESC
Definition: igbvf.h:280
Definition: skge.h:541
#define NULL
NULL pointer (VOID *)
Definition: Base.h:362
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
static void genesis_reset(struct skge_hw *hw, int port)
Definition: skge.c:508
#define ADVERTISED_100baseT_Full
Definition: bnx2.h:45
#define ETIMEDOUT
Connection timed out.
Definition: errno.h:669
#define GPC_ANEG_ADV_ALL_M
Definition: skge.h:1990
static __always_inline int physaddr_t size_t len int flags struct dma_mapping * map
Definition: dma.h:153
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition: pci.h:286
static u16 phy_length(const struct skge_hw *hw, u32 status)
Definition: skge.c:1922
struct skge_element * to_use
Definition: skge.h:2455
#define ADVERTISED_10baseT_Half
Definition: bnx2.h:42
Definition: skge.h:2076
uint8_t u8
Definition: stdint.h:19
static void skge_write32(const struct skge_hw *hw, int reg, u32 val)
Definition: skge.h:2532
u32 control
Definition: skge.h:2436
uint32_t u32
Definition: stdint.h:23
Definition: skge.h:101
static u16 skge_read16(const struct skge_hw *hw, int reg)
Definition: skge.h:2522
static const struct @98 skge_chips[]
static void genesis_mac_init(struct skge_hw *hw, int port)
Definition: skge.c:852
static u32 skge_supported_modes(const struct skge_hw *hw)
Definition: skge.c:92
#define DBG2(...)
Definition: compiler.h:515
static const u16 phy_pause_map[]
Definition: skge.c:536
#define DUPLEX_HALF
Definition: bnx2.h:110
void * memset(void *dest, int character, size_t len) __nonnull
#define XMT_DEF_MSK
Definition: skge.h:2420
static const uint8_t r[3][4]
MD4 shift amounts.
Definition: md4.c:53
#define SK_MAC_TO_53
Definition: skge.h:439
A persistent I/O buffer.
Definition: iobuf.h:32
static void skge_write8(const struct skge_hw *hw, int reg, u8 val)
Definition: skge.h:2542