From 527a626601de6ff89859de90883cc546892bf3ca Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Fri, 20 May 2011 20:18:55 -0700 Subject: skge/sky2/mv643xx/pxa168: Move the Marvell Ethernet drivers Move the Marvell Ethernet drivers into drivers/net/ethernet/marvell/ and make the necessary Kconfig and Makefile changes. CC: Sachin Sanap CC: Zhangfei Gao CC: Philip Rakity CC: Mark Brown CC: Lennert Buytenhek CC: Stephen Hemminger Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/marvell/Kconfig | 110 + drivers/net/ethernet/marvell/Makefile | 8 + drivers/net/ethernet/marvell/mv643xx_eth.c | 3020 ++++++++++++++++ drivers/net/ethernet/marvell/pxa168_eth.c | 1662 +++++++++ drivers/net/ethernet/marvell/skge.c | 4133 ++++++++++++++++++++++ drivers/net/ethernet/marvell/skge.h | 2584 ++++++++++++++ drivers/net/ethernet/marvell/sky2.c | 5130 ++++++++++++++++++++++++++++ drivers/net/ethernet/marvell/sky2.h | 2427 +++++++++++++ 8 files changed, 19074 insertions(+) create mode 100644 drivers/net/ethernet/marvell/Kconfig create mode 100644 drivers/net/ethernet/marvell/Makefile create mode 100644 drivers/net/ethernet/marvell/mv643xx_eth.c create mode 100644 drivers/net/ethernet/marvell/pxa168_eth.c create mode 100644 drivers/net/ethernet/marvell/skge.c create mode 100644 drivers/net/ethernet/marvell/skge.h create mode 100644 drivers/net/ethernet/marvell/sky2.c create mode 100644 drivers/net/ethernet/marvell/sky2.h (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig new file mode 100644 index 000000000000..e525408367b6 --- /dev/null +++ b/drivers/net/ethernet/marvell/Kconfig @@ -0,0 +1,110 @@ +# +# Marvell device configuration +# + +config NET_VENDOR_MARVELL + bool "Marvell devices" + depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Marvell devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_MARVELL + +config MV643XX_ETH + tristate "Marvell Discovery (643XX) and Orion ethernet support" + depends on (MV64X60 || PPC32 || PLAT_ORION) && INET + select INET_LRO + select PHYLIB + ---help--- + This driver supports the gigabit ethernet MACs in the + Marvell Discovery PPC/MIPS chipset family (MV643XX) and + in the Marvell Orion ARM SoC family. + + Some boards that use the Discovery chipset are the Momenco + Ocelot C and Jaguar ATX and Pegasos II. + +config PXA168_ETH + tristate "Marvell pxa168 ethernet support" + depends on CPU_PXA168 + select PHYLIB + ---help--- + This driver supports the pxa168 Ethernet ports. + + To compile this driver as a module, choose M here. The module + will be called pxa168_eth. + +config SKGE + tristate "Marvell Yukon Gigabit Ethernet support" + depends on PCI + select CRC32 + ---help--- + This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx + and related Gigabit Ethernet adapters. It is a new smaller driver + with better performance and more complete ethtool support. + + It does not support the link failover and network management + features that "portable" vendor supplied sk98lin driver does. + + This driver supports adapters based on the original Yukon chipset: + Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T, + Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872. + + It does not support the newer Yukon2 chipset: a separate driver, + sky2, is provided for these adapters. + + To compile this driver as a module, choose M here: the module + will be called skge. This is recommended. + +config SKGE_DEBUG + bool "Debugging interface" + depends on SKGE && DEBUG_FS + ---help--- + This option adds the ability to dump driver state for debugging. + The file /sys/kernel/debug/skge/ethX displays the state of the internal + transmit and receive rings. + + If unsure, say N. + +config SKGE_GENESIS + bool "Support for older SysKonnect Genesis boards" + depends on SKGE + ---help--- + This enables support for the older and uncommon SysKonnect Genesis + chips, which support MII via an external transceiver, instead of + an internal one. Disabling this option will save some memory + by making code smaller. If unsure say Y. + +config SKY2 + tristate "Marvell Yukon 2 support" + depends on PCI + select CRC32 + ---help--- + This driver supports Gigabit Ethernet adapters based on the + Marvell Yukon 2 chipset: + Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/ + 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21 + + There is companion driver for the older Marvell Yukon and + SysKonnect Genesis based adapters: skge. + + To compile this driver as a module, choose M here: the module + will be called sky2. This is recommended. + +config SKY2_DEBUG + bool "Debugging interface" + depends on SKY2 && DEBUG_FS + ---help--- + This option adds the ability to dump driver state for debugging. + The file /sys/kernel/debug/sky2/ethX displays the state of the internal + transmit and receive rings. + + If unsure, say N. + +endif # NET_VENDOR_MARVELL diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile new file mode 100644 index 000000000000..57e3234a37ba --- /dev/null +++ b/drivers/net/ethernet/marvell/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Marvell device drivers. +# + +obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o +obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o +obj-$(CONFIG_SKGE) += skge.o +obj-$(CONFIG_SKY2) += sky2.o diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c new file mode 100644 index 000000000000..259699983ca5 --- /dev/null +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -0,0 +1,3020 @@ +/* + * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports + * Copyright (C) 2002 Matthew Dharm + * + * Based on the 64360 driver from: + * Copyright (C) 2002 Rabeeh Khoury + * Rabeeh Khoury + * + * Copyright (C) 2003 PMC-Sierra, Inc., + * written by Manish Lachwani + * + * Copyright (C) 2003 Ralf Baechle + * + * Copyright (C) 2004-2006 MontaVista Software, Inc. + * Dale Farnsworth + * + * Copyright (C) 2004 Steven J. Hill + * + * + * Copyright (C) 2007-2008 Marvell Semiconductor + * Lennert Buytenhek + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char mv643xx_eth_driver_name[] = "mv643xx_eth"; +static char mv643xx_eth_driver_version[] = "1.4"; + + +/* + * Registers shared between all ports. + */ +#define PHY_ADDR 0x0000 +#define SMI_REG 0x0004 +#define SMI_BUSY 0x10000000 +#define SMI_READ_VALID 0x08000000 +#define SMI_OPCODE_READ 0x04000000 +#define SMI_OPCODE_WRITE 0x00000000 +#define ERR_INT_CAUSE 0x0080 +#define ERR_INT_SMI_DONE 0x00000010 +#define ERR_INT_MASK 0x0084 +#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) +#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) +#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) +#define WINDOW_BAR_ENABLE 0x0290 +#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) + +/* + * Main per-port registers. These live at offset 0x0400 for + * port #0, 0x0800 for port #1, and 0x0c00 for port #2. + */ +#define PORT_CONFIG 0x0000 +#define UNICAST_PROMISCUOUS_MODE 0x00000001 +#define PORT_CONFIG_EXT 0x0004 +#define MAC_ADDR_LOW 0x0014 +#define MAC_ADDR_HIGH 0x0018 +#define SDMA_CONFIG 0x001c +#define TX_BURST_SIZE_16_64BIT 0x01000000 +#define TX_BURST_SIZE_4_64BIT 0x00800000 +#define BLM_TX_NO_SWAP 0x00000020 +#define BLM_RX_NO_SWAP 0x00000010 +#define RX_BURST_SIZE_16_64BIT 0x00000008 +#define RX_BURST_SIZE_4_64BIT 0x00000004 +#define PORT_SERIAL_CONTROL 0x003c +#define SET_MII_SPEED_TO_100 0x01000000 +#define SET_GMII_SPEED_TO_1000 0x00800000 +#define SET_FULL_DUPLEX_MODE 0x00200000 +#define MAX_RX_PACKET_9700BYTE 0x000a0000 +#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 +#define DO_NOT_FORCE_LINK_FAIL 0x00000400 +#define SERIAL_PORT_CONTROL_RESERVED 0x00000200 +#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 +#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 +#define FORCE_LINK_PASS 0x00000002 +#define SERIAL_PORT_ENABLE 0x00000001 +#define PORT_STATUS 0x0044 +#define TX_FIFO_EMPTY 0x00000400 +#define TX_IN_PROGRESS 0x00000080 +#define PORT_SPEED_MASK 0x00000030 +#define PORT_SPEED_1000 0x00000010 +#define PORT_SPEED_100 0x00000020 +#define PORT_SPEED_10 0x00000000 +#define FLOW_CONTROL_ENABLED 0x00000008 +#define FULL_DUPLEX 0x00000004 +#define LINK_UP 0x00000002 +#define TXQ_COMMAND 0x0048 +#define TXQ_FIX_PRIO_CONF 0x004c +#define TX_BW_RATE 0x0050 +#define TX_BW_MTU 0x0058 +#define TX_BW_BURST 0x005c +#define INT_CAUSE 0x0060 +#define INT_TX_END 0x07f80000 +#define INT_TX_END_0 0x00080000 +#define INT_RX 0x000003fc +#define INT_RX_0 0x00000004 +#define INT_EXT 0x00000002 +#define INT_CAUSE_EXT 0x0064 +#define INT_EXT_LINK_PHY 0x00110000 +#define INT_EXT_TX 0x000000ff +#define INT_MASK 0x0068 +#define INT_MASK_EXT 0x006c +#define TX_FIFO_URGENT_THRESHOLD 0x0074 +#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc +#define TX_BW_RATE_MOVED 0x00e0 +#define TX_BW_MTU_MOVED 0x00e8 +#define TX_BW_BURST_MOVED 0x00ec +#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) +#define RXQ_COMMAND 0x0280 +#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) +#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) +#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) +#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) + +/* + * Misc per-port registers. + */ +#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) +#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) +#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) +#define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) + + +/* + * SDMA configuration register default value. + */ +#if defined(__BIG_ENDIAN) +#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ + (RX_BURST_SIZE_4_64BIT | \ + TX_BURST_SIZE_4_64BIT) +#elif defined(__LITTLE_ENDIAN) +#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ + (RX_BURST_SIZE_4_64BIT | \ + BLM_RX_NO_SWAP | \ + BLM_TX_NO_SWAP | \ + TX_BURST_SIZE_4_64BIT) +#else +#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined +#endif + + +/* + * Misc definitions. + */ +#define DEFAULT_RX_QUEUE_SIZE 128 +#define DEFAULT_TX_QUEUE_SIZE 256 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) + + +/* + * RX/TX descriptors. + */ +#if defined(__BIG_ENDIAN) +struct rx_desc { + u16 byte_cnt; /* Descriptor buffer byte count */ + u16 buf_size; /* Buffer size */ + u32 cmd_sts; /* Descriptor command status */ + u32 next_desc_ptr; /* Next descriptor pointer */ + u32 buf_ptr; /* Descriptor buffer pointer */ +}; + +struct tx_desc { + u16 byte_cnt; /* buffer byte count */ + u16 l4i_chk; /* CPU provided TCP checksum */ + u32 cmd_sts; /* Command/status field */ + u32 next_desc_ptr; /* Pointer to next descriptor */ + u32 buf_ptr; /* pointer to buffer for this descriptor*/ +}; +#elif defined(__LITTLE_ENDIAN) +struct rx_desc { + u32 cmd_sts; /* Descriptor command status */ + u16 buf_size; /* Buffer size */ + u16 byte_cnt; /* Descriptor buffer byte count */ + u32 buf_ptr; /* Descriptor buffer pointer */ + u32 next_desc_ptr; /* Next descriptor pointer */ +}; + +struct tx_desc { + u32 cmd_sts; /* Command/status field */ + u16 l4i_chk; /* CPU provided TCP checksum */ + u16 byte_cnt; /* buffer byte count */ + u32 buf_ptr; /* pointer to buffer for this descriptor*/ + u32 next_desc_ptr; /* Pointer to next descriptor */ +}; +#else +#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined +#endif + +/* RX & TX descriptor command */ +#define BUFFER_OWNED_BY_DMA 0x80000000 + +/* RX & TX descriptor status */ +#define ERROR_SUMMARY 0x00000001 + +/* RX descriptor status */ +#define LAYER_4_CHECKSUM_OK 0x40000000 +#define RX_ENABLE_INTERRUPT 0x20000000 +#define RX_FIRST_DESC 0x08000000 +#define RX_LAST_DESC 0x04000000 +#define RX_IP_HDR_OK 0x02000000 +#define RX_PKT_IS_IPV4 0x01000000 +#define RX_PKT_IS_ETHERNETV2 0x00800000 +#define RX_PKT_LAYER4_TYPE_MASK 0x00600000 +#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 +#define RX_PKT_IS_VLAN_TAGGED 0x00080000 + +/* TX descriptor command */ +#define TX_ENABLE_INTERRUPT 0x00800000 +#define GEN_CRC 0x00400000 +#define TX_FIRST_DESC 0x00200000 +#define TX_LAST_DESC 0x00100000 +#define ZERO_PADDING 0x00080000 +#define GEN_IP_V4_CHECKSUM 0x00040000 +#define GEN_TCP_UDP_CHECKSUM 0x00020000 +#define UDP_FRAME 0x00010000 +#define MAC_HDR_EXTRA_4_BYTES 0x00008000 +#define MAC_HDR_EXTRA_8_BYTES 0x00000200 + +#define TX_IHL_SHIFT 11 + + +/* global *******************************************************************/ +struct mv643xx_eth_shared_private { + /* + * Ethernet controller base address. + */ + void __iomem *base; + + /* + * Points at the right SMI instance to use. + */ + struct mv643xx_eth_shared_private *smi; + + /* + * Provides access to local SMI interface. + */ + struct mii_bus *smi_bus; + + /* + * If we have access to the error interrupt pin (which is + * somewhat misnamed as it not only reflects internal errors + * but also reflects SMI completion), use that to wait for + * SMI access completion instead of polling the SMI busy bit. + */ + int err_interrupt; + wait_queue_head_t smi_busy_wait; + + /* + * Per-port MBUS window access register value. + */ + u32 win_protect; + + /* + * Hardware-specific parameters. + */ + unsigned int t_clk; + int extended_rx_coal_limit; + int tx_bw_control; + int tx_csum_limit; +}; + +#define TX_BW_CONTROL_ABSENT 0 +#define TX_BW_CONTROL_OLD_LAYOUT 1 +#define TX_BW_CONTROL_NEW_LAYOUT 2 + +static int mv643xx_eth_open(struct net_device *dev); +static int mv643xx_eth_stop(struct net_device *dev); + + +/* per-port *****************************************************************/ +struct mib_counters { + u64 good_octets_received; + u32 bad_octets_received; + u32 internal_mac_transmit_err; + u32 good_frames_received; + u32 bad_frames_received; + u32 broadcast_frames_received; + u32 multicast_frames_received; + u32 frames_64_octets; + u32 frames_65_to_127_octets; + u32 frames_128_to_255_octets; + u32 frames_256_to_511_octets; + u32 frames_512_to_1023_octets; + u32 frames_1024_to_max_octets; + u64 good_octets_sent; + u32 good_frames_sent; + u32 excessive_collision; + u32 multicast_frames_sent; + u32 broadcast_frames_sent; + u32 unrec_mac_control_received; + u32 fc_sent; + u32 good_fc_received; + u32 bad_fc_received; + u32 undersize_received; + u32 fragments_received; + u32 oversize_received; + u32 jabber_received; + u32 mac_receive_error; + u32 bad_crc_event; + u32 collision; + u32 late_collision; +}; + +struct lro_counters { + u32 lro_aggregated; + u32 lro_flushed; + u32 lro_no_desc; +}; + +struct rx_queue { + int index; + + int rx_ring_size; + + int rx_desc_count; + int rx_curr_desc; + int rx_used_desc; + + struct rx_desc *rx_desc_area; + dma_addr_t rx_desc_dma; + int rx_desc_area_size; + struct sk_buff **rx_skb; + + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_arr[8]; +}; + +struct tx_queue { + int index; + + int tx_ring_size; + + int tx_desc_count; + int tx_curr_desc; + int tx_used_desc; + + struct tx_desc *tx_desc_area; + dma_addr_t tx_desc_dma; + int tx_desc_area_size; + + struct sk_buff_head tx_skb; + + unsigned long tx_packets; + unsigned long tx_bytes; + unsigned long tx_dropped; +}; + +struct mv643xx_eth_private { + struct mv643xx_eth_shared_private *shared; + void __iomem *base; + int port_num; + + struct net_device *dev; + + struct phy_device *phy; + + struct timer_list mib_counters_timer; + spinlock_t mib_counters_lock; + struct mib_counters mib_counters; + + struct lro_counters lro_counters; + + struct work_struct tx_timeout_task; + + struct napi_struct napi; + u32 int_mask; + u8 oom; + u8 work_link; + u8 work_tx; + u8 work_tx_end; + u8 work_rx; + u8 work_rx_refill; + + int skb_size; + struct sk_buff_head rx_recycle; + + /* + * RX state. + */ + int rx_ring_size; + unsigned long rx_desc_sram_addr; + int rx_desc_sram_size; + int rxq_count; + struct timer_list rx_oom; + struct rx_queue rxq[8]; + + /* + * TX state. + */ + int tx_ring_size; + unsigned long tx_desc_sram_addr; + int tx_desc_sram_size; + int txq_count; + struct tx_queue txq[8]; +}; + + +/* port register accessors **************************************************/ +static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) +{ + return readl(mp->shared->base + offset); +} + +static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) +{ + return readl(mp->base + offset); +} + +static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) +{ + writel(data, mp->shared->base + offset); +} + +static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) +{ + writel(data, mp->base + offset); +} + + +/* rxq/txq helper functions *************************************************/ +static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) +{ + return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); +} + +static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) +{ + return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); +} + +static void rxq_enable(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + wrlp(mp, RXQ_COMMAND, 1 << rxq->index); +} + +static void rxq_disable(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + u8 mask = 1 << rxq->index; + + wrlp(mp, RXQ_COMMAND, mask << 8); + while (rdlp(mp, RXQ_COMMAND) & mask) + udelay(10); +} + +static void txq_reset_hw_ptr(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + u32 addr; + + addr = (u32)txq->tx_desc_dma; + addr += txq->tx_curr_desc * sizeof(struct tx_desc); + wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); +} + +static void txq_enable(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + wrlp(mp, TXQ_COMMAND, 1 << txq->index); +} + +static void txq_disable(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + u8 mask = 1 << txq->index; + + wrlp(mp, TXQ_COMMAND, mask << 8); + while (rdlp(mp, TXQ_COMMAND) & mask) + udelay(10); +} + +static void txq_maybe_wake(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); + + if (netif_tx_queue_stopped(nq)) { + __netif_tx_lock(nq, smp_processor_id()); + if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) + netif_tx_wake_queue(nq); + __netif_tx_unlock(nq); + } +} + + +/* rx napi ******************************************************************/ +static int +mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, + u64 *hdr_flags, void *priv) +{ + unsigned long cmd_sts = (unsigned long)priv; + + /* + * Make sure that this packet is Ethernet II, is not VLAN + * tagged, is IPv4, has a valid IP header, and is TCP. + */ + if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | + RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | + RX_PKT_IS_VLAN_TAGGED)) != + (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | + RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) + return -1; + + skb_reset_network_header(skb); + skb_set_transport_header(skb, ip_hdrlen(skb)); + *iphdr = ip_hdr(skb); + *tcph = tcp_hdr(skb); + *hdr_flags = LRO_IPV4 | LRO_TCP; + + return 0; +} + +static int rxq_process(struct rx_queue *rxq, int budget) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + struct net_device_stats *stats = &mp->dev->stats; + int lro_flush_needed; + int rx; + + lro_flush_needed = 0; + rx = 0; + while (rx < budget && rxq->rx_desc_count) { + struct rx_desc *rx_desc; + unsigned int cmd_sts; + struct sk_buff *skb; + u16 byte_cnt; + + rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; + + cmd_sts = rx_desc->cmd_sts; + if (cmd_sts & BUFFER_OWNED_BY_DMA) + break; + rmb(); + + skb = rxq->rx_skb[rxq->rx_curr_desc]; + rxq->rx_skb[rxq->rx_curr_desc] = NULL; + + rxq->rx_curr_desc++; + if (rxq->rx_curr_desc == rxq->rx_ring_size) + rxq->rx_curr_desc = 0; + + dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, + rx_desc->buf_size, DMA_FROM_DEVICE); + rxq->rx_desc_count--; + rx++; + + mp->work_rx_refill |= 1 << rxq->index; + + byte_cnt = rx_desc->byte_cnt; + + /* + * Update statistics. + * + * Note that the descriptor byte count includes 2 dummy + * bytes automatically inserted by the hardware at the + * start of the packet (which we don't count), and a 4 + * byte CRC at the end of the packet (which we do count). + */ + stats->rx_packets++; + stats->rx_bytes += byte_cnt - 2; + + /* + * In case we received a packet without first / last bits + * on, or the error summary bit is set, the packet needs + * to be dropped. + */ + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) + != (RX_FIRST_DESC | RX_LAST_DESC)) + goto err; + + /* + * The -4 is for the CRC in the trailer of the + * received packet + */ + skb_put(skb, byte_cnt - 2 - 4); + + if (cmd_sts & LAYER_4_CHECKSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->protocol = eth_type_trans(skb, mp->dev); + + if (skb->dev->features & NETIF_F_LRO && + skb->ip_summed == CHECKSUM_UNNECESSARY) { + lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); + lro_flush_needed = 1; + } else + netif_receive_skb(skb); + + continue; + +err: + stats->rx_dropped++; + + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) { + if (net_ratelimit()) + netdev_err(mp->dev, + "received packet spanning multiple descriptors\n"); + } + + if (cmd_sts & ERROR_SUMMARY) + stats->rx_errors++; + + dev_kfree_skb(skb); + } + + if (lro_flush_needed) + lro_flush_all(&rxq->lro_mgr); + + if (rx < budget) + mp->work_rx &= ~(1 << rxq->index); + + return rx; +} + +static int rxq_refill(struct rx_queue *rxq, int budget) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + int refilled; + + refilled = 0; + while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { + struct sk_buff *skb; + int rx; + struct rx_desc *rx_desc; + int size; + + skb = __skb_dequeue(&mp->rx_recycle); + if (skb == NULL) + skb = dev_alloc_skb(mp->skb_size); + + if (skb == NULL) { + mp->oom = 1; + goto oom; + } + + if (SKB_DMA_REALIGN) + skb_reserve(skb, SKB_DMA_REALIGN); + + refilled++; + rxq->rx_desc_count++; + + rx = rxq->rx_used_desc++; + if (rxq->rx_used_desc == rxq->rx_ring_size) + rxq->rx_used_desc = 0; + + rx_desc = rxq->rx_desc_area + rx; + + size = skb->end - skb->data; + rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, + skb->data, size, + DMA_FROM_DEVICE); + rx_desc->buf_size = size; + rxq->rx_skb[rx] = skb; + wmb(); + rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; + wmb(); + + /* + * The hardware automatically prepends 2 bytes of + * dummy data to each received packet, so that the + * IP header ends up 16-byte aligned. + */ + skb_reserve(skb, 2); + } + + if (refilled < budget) + mp->work_rx_refill &= ~(1 << rxq->index); + +oom: + return refilled; +} + + +/* tx ***********************************************************************/ +static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) +{ + int frag; + + for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { + skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; + if (fragp->size <= 8 && fragp->page_offset & 7) + return 1; + } + + return 0; +} + +static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int nr_frags = skb_shinfo(skb)->nr_frags; + int frag; + + for (frag = 0; frag < nr_frags; frag++) { + skb_frag_t *this_frag; + int tx_index; + struct tx_desc *desc; + + this_frag = &skb_shinfo(skb)->frags[frag]; + tx_index = txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; + desc = &txq->tx_desc_area[tx_index]; + + /* + * The last fragment will generate an interrupt + * which will free the skb on TX completion. + */ + if (frag == nr_frags - 1) { + desc->cmd_sts = BUFFER_OWNED_BY_DMA | + ZERO_PADDING | TX_LAST_DESC | + TX_ENABLE_INTERRUPT; + } else { + desc->cmd_sts = BUFFER_OWNED_BY_DMA; + } + + desc->l4i_chk = 0; + desc->byte_cnt = this_frag->size; + desc->buf_ptr = dma_map_page(mp->dev->dev.parent, + this_frag->page, + this_frag->page_offset, + this_frag->size, DMA_TO_DEVICE); + } +} + +static inline __be16 sum16_as_be(__sum16 sum) +{ + return (__force __be16)sum; +} + +static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int nr_frags = skb_shinfo(skb)->nr_frags; + int tx_index; + struct tx_desc *desc; + u32 cmd_sts; + u16 l4i_chk; + int length; + + cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; + l4i_chk = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int hdr_len; + int tag_bytes; + + BUG_ON(skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_8021Q)); + + hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; + tag_bytes = hdr_len - ETH_HLEN; + if (skb->len - hdr_len > mp->shared->tx_csum_limit || + unlikely(tag_bytes & ~12)) { + if (skb_checksum_help(skb) == 0) + goto no_csum; + kfree_skb(skb); + return 1; + } + + if (tag_bytes & 4) + cmd_sts |= MAC_HDR_EXTRA_4_BYTES; + if (tag_bytes & 8) + cmd_sts |= MAC_HDR_EXTRA_8_BYTES; + + cmd_sts |= GEN_TCP_UDP_CHECKSUM | + GEN_IP_V4_CHECKSUM | + ip_hdr(skb)->ihl << TX_IHL_SHIFT; + + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + cmd_sts |= UDP_FRAME; + l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); + break; + case IPPROTO_TCP: + l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); + break; + default: + BUG(); + } + } else { +no_csum: + /* Errata BTS #50, IHL must be 5 if no HW checksum */ + cmd_sts |= 5 << TX_IHL_SHIFT; + } + + tx_index = txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; + desc = &txq->tx_desc_area[tx_index]; + + if (nr_frags) { + txq_submit_frag_skb(txq, skb); + length = skb_headlen(skb); + } else { + cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; + length = skb->len; + } + + desc->l4i_chk = l4i_chk; + desc->byte_cnt = length; + desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, + length, DMA_TO_DEVICE); + + __skb_queue_tail(&txq->tx_skb, skb); + + skb_tx_timestamp(skb); + + /* ensure all other descriptors are written before first cmd_sts */ + wmb(); + desc->cmd_sts = cmd_sts; + + /* clear TX_END status */ + mp->work_tx_end &= ~(1 << txq->index); + + /* ensure all descriptors are written before poking hardware */ + wmb(); + txq_enable(txq); + + txq->tx_desc_count += nr_frags + 1; + + return 0; +} + +static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int length, queue; + struct tx_queue *txq; + struct netdev_queue *nq; + + queue = skb_get_queue_mapping(skb); + txq = mp->txq + queue; + nq = netdev_get_tx_queue(dev, queue); + + if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { + txq->tx_dropped++; + netdev_printk(KERN_DEBUG, dev, + "failed to linearize skb with tiny unaligned fragment\n"); + return NETDEV_TX_BUSY; + } + + if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { + if (net_ratelimit()) + netdev_err(dev, "tx queue full?!\n"); + kfree_skb(skb); + return NETDEV_TX_OK; + } + + length = skb->len; + + if (!txq_submit_skb(txq, skb)) { + int entries_left; + + txq->tx_bytes += length; + txq->tx_packets++; + + entries_left = txq->tx_ring_size - txq->tx_desc_count; + if (entries_left < MAX_SKB_FRAGS + 1) + netif_tx_stop_queue(nq); + } + + return NETDEV_TX_OK; +} + + +/* tx napi ******************************************************************/ +static void txq_kick(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); + u32 hw_desc_ptr; + u32 expected_ptr; + + __netif_tx_lock(nq, smp_processor_id()); + + if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) + goto out; + + hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); + expected_ptr = (u32)txq->tx_desc_dma + + txq->tx_curr_desc * sizeof(struct tx_desc); + + if (hw_desc_ptr != expected_ptr) + txq_enable(txq); + +out: + __netif_tx_unlock(nq); + + mp->work_tx_end &= ~(1 << txq->index); +} + +static int txq_reclaim(struct tx_queue *txq, int budget, int force) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); + int reclaimed; + + __netif_tx_lock(nq, smp_processor_id()); + + reclaimed = 0; + while (reclaimed < budget && txq->tx_desc_count > 0) { + int tx_index; + struct tx_desc *desc; + u32 cmd_sts; + struct sk_buff *skb; + + tx_index = txq->tx_used_desc; + desc = &txq->tx_desc_area[tx_index]; + cmd_sts = desc->cmd_sts; + + if (cmd_sts & BUFFER_OWNED_BY_DMA) { + if (!force) + break; + desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; + } + + txq->tx_used_desc = tx_index + 1; + if (txq->tx_used_desc == txq->tx_ring_size) + txq->tx_used_desc = 0; + + reclaimed++; + txq->tx_desc_count--; + + skb = NULL; + if (cmd_sts & TX_LAST_DESC) + skb = __skb_dequeue(&txq->tx_skb); + + if (cmd_sts & ERROR_SUMMARY) { + netdev_info(mp->dev, "tx error\n"); + mp->dev->stats.tx_errors++; + } + + if (cmd_sts & TX_FIRST_DESC) { + dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, + desc->byte_cnt, DMA_TO_DEVICE); + } else { + dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, + desc->byte_cnt, DMA_TO_DEVICE); + } + + if (skb != NULL) { + if (skb_queue_len(&mp->rx_recycle) < + mp->rx_ring_size && + skb_recycle_check(skb, mp->skb_size)) + __skb_queue_head(&mp->rx_recycle, skb); + else + dev_kfree_skb(skb); + } + } + + __netif_tx_unlock(nq); + + if (reclaimed < budget) + mp->work_tx &= ~(1 << txq->index); + + return reclaimed; +} + + +/* tx rate control **********************************************************/ +/* + * Set total maximum TX rate (shared by all TX queues for this port) + * to 'rate' bits per second, with a maximum burst of 'burst' bytes. + */ +static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) +{ + int token_rate; + int mtu; + int bucket_size; + + token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); + if (token_rate > 1023) + token_rate = 1023; + + mtu = (mp->dev->mtu + 255) >> 8; + if (mtu > 63) + mtu = 63; + + bucket_size = (burst + 255) >> 8; + if (bucket_size > 65535) + bucket_size = 65535; + + switch (mp->shared->tx_bw_control) { + case TX_BW_CONTROL_OLD_LAYOUT: + wrlp(mp, TX_BW_RATE, token_rate); + wrlp(mp, TX_BW_MTU, mtu); + wrlp(mp, TX_BW_BURST, bucket_size); + break; + case TX_BW_CONTROL_NEW_LAYOUT: + wrlp(mp, TX_BW_RATE_MOVED, token_rate); + wrlp(mp, TX_BW_MTU_MOVED, mtu); + wrlp(mp, TX_BW_BURST_MOVED, bucket_size); + break; + } +} + +static void txq_set_rate(struct tx_queue *txq, int rate, int burst) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int token_rate; + int bucket_size; + + token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); + if (token_rate > 1023) + token_rate = 1023; + + bucket_size = (burst + 255) >> 8; + if (bucket_size > 65535) + bucket_size = 65535; + + wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); + wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); +} + +static void txq_set_fixed_prio_mode(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int off; + u32 val; + + /* + * Turn on fixed priority mode. + */ + off = 0; + switch (mp->shared->tx_bw_control) { + case TX_BW_CONTROL_OLD_LAYOUT: + off = TXQ_FIX_PRIO_CONF; + break; + case TX_BW_CONTROL_NEW_LAYOUT: + off = TXQ_FIX_PRIO_CONF_MOVED; + break; + } + + if (off) { + val = rdlp(mp, off); + val |= 1 << txq->index; + wrlp(mp, off, val); + } +} + + +/* mii management interface *************************************************/ +static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) +{ + struct mv643xx_eth_shared_private *msp = dev_id; + + if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { + writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); + wake_up(&msp->smi_busy_wait); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int smi_is_done(struct mv643xx_eth_shared_private *msp) +{ + return !(readl(msp->base + SMI_REG) & SMI_BUSY); +} + +static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) +{ + if (msp->err_interrupt == NO_IRQ) { + int i; + + for (i = 0; !smi_is_done(msp); i++) { + if (i == 10) + return -ETIMEDOUT; + msleep(10); + } + + return 0; + } + + if (!smi_is_done(msp)) { + wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), + msecs_to_jiffies(100)); + if (!smi_is_done(msp)) + return -ETIMEDOUT; + } + + return 0; +} + +static int smi_bus_read(struct mii_bus *bus, int addr, int reg) +{ + struct mv643xx_eth_shared_private *msp = bus->priv; + void __iomem *smi_reg = msp->base + SMI_REG; + int ret; + + if (smi_wait_ready(msp)) { + pr_warn("SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); + + if (smi_wait_ready(msp)) { + pr_warn("SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + ret = readl(smi_reg); + if (!(ret & SMI_READ_VALID)) { + pr_warn("SMI bus read not valid\n"); + return -ENODEV; + } + + return ret & 0xffff; +} + +static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct mv643xx_eth_shared_private *msp = bus->priv; + void __iomem *smi_reg = msp->base + SMI_REG; + + if (smi_wait_ready(msp)) { + pr_warn("SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + writel(SMI_OPCODE_WRITE | (reg << 21) | + (addr << 16) | (val & 0xffff), smi_reg); + + if (smi_wait_ready(msp)) { + pr_warn("SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + + +/* statistics ***************************************************************/ +static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned long tx_packets = 0; + unsigned long tx_bytes = 0; + unsigned long tx_dropped = 0; + int i; + + for (i = 0; i < mp->txq_count; i++) { + struct tx_queue *txq = mp->txq + i; + + tx_packets += txq->tx_packets; + tx_bytes += txq->tx_bytes; + tx_dropped += txq->tx_dropped; + } + + stats->tx_packets = tx_packets; + stats->tx_bytes = tx_bytes; + stats->tx_dropped = tx_dropped; + + return stats; +} + +static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) +{ + u32 lro_aggregated = 0; + u32 lro_flushed = 0; + u32 lro_no_desc = 0; + int i; + + for (i = 0; i < mp->rxq_count; i++) { + struct rx_queue *rxq = mp->rxq + i; + + lro_aggregated += rxq->lro_mgr.stats.aggregated; + lro_flushed += rxq->lro_mgr.stats.flushed; + lro_no_desc += rxq->lro_mgr.stats.no_desc; + } + + mp->lro_counters.lro_aggregated = lro_aggregated; + mp->lro_counters.lro_flushed = lro_flushed; + mp->lro_counters.lro_no_desc = lro_no_desc; +} + +static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) +{ + return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); +} + +static void mib_counters_clear(struct mv643xx_eth_private *mp) +{ + int i; + + for (i = 0; i < 0x80; i += 4) + mib_read(mp, i); +} + +static void mib_counters_update(struct mv643xx_eth_private *mp) +{ + struct mib_counters *p = &mp->mib_counters; + + spin_lock_bh(&mp->mib_counters_lock); + p->good_octets_received += mib_read(mp, 0x00); + p->bad_octets_received += mib_read(mp, 0x08); + p->internal_mac_transmit_err += mib_read(mp, 0x0c); + p->good_frames_received += mib_read(mp, 0x10); + p->bad_frames_received += mib_read(mp, 0x14); + p->broadcast_frames_received += mib_read(mp, 0x18); + p->multicast_frames_received += mib_read(mp, 0x1c); + p->frames_64_octets += mib_read(mp, 0x20); + p->frames_65_to_127_octets += mib_read(mp, 0x24); + p->frames_128_to_255_octets += mib_read(mp, 0x28); + p->frames_256_to_511_octets += mib_read(mp, 0x2c); + p->frames_512_to_1023_octets += mib_read(mp, 0x30); + p->frames_1024_to_max_octets += mib_read(mp, 0x34); + p->good_octets_sent += mib_read(mp, 0x38); + p->good_frames_sent += mib_read(mp, 0x40); + p->excessive_collision += mib_read(mp, 0x44); + p->multicast_frames_sent += mib_read(mp, 0x48); + p->broadcast_frames_sent += mib_read(mp, 0x4c); + p->unrec_mac_control_received += mib_read(mp, 0x50); + p->fc_sent += mib_read(mp, 0x54); + p->good_fc_received += mib_read(mp, 0x58); + p->bad_fc_received += mib_read(mp, 0x5c); + p->undersize_received += mib_read(mp, 0x60); + p->fragments_received += mib_read(mp, 0x64); + p->oversize_received += mib_read(mp, 0x68); + p->jabber_received += mib_read(mp, 0x6c); + p->mac_receive_error += mib_read(mp, 0x70); + p->bad_crc_event += mib_read(mp, 0x74); + p->collision += mib_read(mp, 0x78); + p->late_collision += mib_read(mp, 0x7c); + spin_unlock_bh(&mp->mib_counters_lock); + + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); +} + +static void mib_counters_timer_wrapper(unsigned long _mp) +{ + struct mv643xx_eth_private *mp = (void *)_mp; + + mib_counters_update(mp); +} + + +/* interrupt coalescing *****************************************************/ +/* + * Hardware coalescing parameters are set in units of 64 t_clk + * cycles. I.e.: + * + * coal_delay_in_usec = 64000000 * register_value / t_clk_rate + * + * register_value = coal_delay_in_usec * t_clk_rate / 64000000 + * + * In the ->set*() methods, we round the computed register value + * to the nearest integer. + */ +static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) +{ + u32 val = rdlp(mp, SDMA_CONFIG); + u64 temp; + + if (mp->shared->extended_rx_coal_limit) + temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); + else + temp = (val & 0x003fff00) >> 8; + + temp *= 64000000; + do_div(temp, mp->shared->t_clk); + + return (unsigned int)temp; +} + +static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) +{ + u64 temp; + u32 val; + + temp = (u64)usec * mp->shared->t_clk; + temp += 31999999; + do_div(temp, 64000000); + + val = rdlp(mp, SDMA_CONFIG); + if (mp->shared->extended_rx_coal_limit) { + if (temp > 0xffff) + temp = 0xffff; + val &= ~0x023fff80; + val |= (temp & 0x8000) << 10; + val |= (temp & 0x7fff) << 7; + } else { + if (temp > 0x3fff) + temp = 0x3fff; + val &= ~0x003fff00; + val |= (temp & 0x3fff) << 8; + } + wrlp(mp, SDMA_CONFIG, val); +} + +static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) +{ + u64 temp; + + temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; + temp *= 64000000; + do_div(temp, mp->shared->t_clk); + + return (unsigned int)temp; +} + +static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) +{ + u64 temp; + + temp = (u64)usec * mp->shared->t_clk; + temp += 31999999; + do_div(temp, 64000000); + + if (temp > 0x3fff) + temp = 0x3fff; + + wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); +} + + +/* ethtool ******************************************************************/ +struct mv643xx_eth_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int netdev_off; + int mp_off; +}; + +#define SSTAT(m) \ + { #m, FIELD_SIZEOF(struct net_device_stats, m), \ + offsetof(struct net_device, stats.m), -1 } + +#define MIBSTAT(m) \ + { #m, FIELD_SIZEOF(struct mib_counters, m), \ + -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } + +#define LROSTAT(m) \ + { #m, FIELD_SIZEOF(struct lro_counters, m), \ + -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } + +static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { + SSTAT(rx_packets), + SSTAT(tx_packets), + SSTAT(rx_bytes), + SSTAT(tx_bytes), + SSTAT(rx_errors), + SSTAT(tx_errors), + SSTAT(rx_dropped), + SSTAT(tx_dropped), + MIBSTAT(good_octets_received), + MIBSTAT(bad_octets_received), + MIBSTAT(internal_mac_transmit_err), + MIBSTAT(good_frames_received), + MIBSTAT(bad_frames_received), + MIBSTAT(broadcast_frames_received), + MIBSTAT(multicast_frames_received), + MIBSTAT(frames_64_octets), + MIBSTAT(frames_65_to_127_octets), + MIBSTAT(frames_128_to_255_octets), + MIBSTAT(frames_256_to_511_octets), + MIBSTAT(frames_512_to_1023_octets), + MIBSTAT(frames_1024_to_max_octets), + MIBSTAT(good_octets_sent), + MIBSTAT(good_frames_sent), + MIBSTAT(excessive_collision), + MIBSTAT(multicast_frames_sent), + MIBSTAT(broadcast_frames_sent), + MIBSTAT(unrec_mac_control_received), + MIBSTAT(fc_sent), + MIBSTAT(good_fc_received), + MIBSTAT(bad_fc_received), + MIBSTAT(undersize_received), + MIBSTAT(fragments_received), + MIBSTAT(oversize_received), + MIBSTAT(jabber_received), + MIBSTAT(mac_receive_error), + MIBSTAT(bad_crc_event), + MIBSTAT(collision), + MIBSTAT(late_collision), + LROSTAT(lro_aggregated), + LROSTAT(lro_flushed), + LROSTAT(lro_no_desc), +}; + +static int +mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, + struct ethtool_cmd *cmd) +{ + int err; + + err = phy_read_status(mp->phy); + if (err == 0) + err = phy_ethtool_gset(mp->phy, cmd); + + /* + * The MAC does not support 1000baseT_Half. + */ + cmd->supported &= ~SUPPORTED_1000baseT_Half; + cmd->advertising &= ~ADVERTISED_1000baseT_Half; + + return err; +} + +static int +mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, + struct ethtool_cmd *cmd) +{ + u32 port_status; + + port_status = rdlp(mp, PORT_STATUS); + + cmd->supported = SUPPORTED_MII; + cmd->advertising = ADVERTISED_MII; + switch (port_status & PORT_SPEED_MASK) { + case PORT_SPEED_10: + ethtool_cmd_speed_set(cmd, SPEED_10); + break; + case PORT_SPEED_100: + ethtool_cmd_speed_set(cmd, SPEED_100); + break; + case PORT_SPEED_1000: + ethtool_cmd_speed_set(cmd, SPEED_1000); + break; + default: + cmd->speed = -1; + break; + } + cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; + cmd->port = PORT_MII; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_DISABLE; + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + + return 0; +} + +static int +mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (mp->phy != NULL) + return mv643xx_eth_get_settings_phy(mp, cmd); + else + return mv643xx_eth_get_settings_phyless(mp, cmd); +} + +static int +mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (mp->phy == NULL) + return -EINVAL; + + /* + * The MAC does not support 1000baseT_Half. + */ + cmd->advertising &= ~ADVERTISED_1000baseT_Half; + + return phy_ethtool_sset(mp->phy, cmd); +} + +static void mv643xx_eth_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); + strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, "platform", 32); + drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); +} + +static int mv643xx_eth_nway_reset(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (mp->phy == NULL) + return -EINVAL; + + return genphy_restart_aneg(mp->phy); +} + +static int +mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + ec->rx_coalesce_usecs = get_rx_coal(mp); + ec->tx_coalesce_usecs = get_tx_coal(mp); + + return 0; +} + +static int +mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + set_rx_coal(mp, ec->rx_coalesce_usecs); + set_tx_coal(mp, ec->tx_coalesce_usecs); + + return 0; +} + +static void +mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + er->rx_max_pending = 4096; + er->tx_max_pending = 4096; + er->rx_mini_max_pending = 0; + er->rx_jumbo_max_pending = 0; + + er->rx_pending = mp->rx_ring_size; + er->tx_pending = mp->tx_ring_size; + er->rx_mini_pending = 0; + er->rx_jumbo_pending = 0; +} + +static int +mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (er->rx_mini_pending || er->rx_jumbo_pending) + return -EINVAL; + + mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; + mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; + + if (netif_running(dev)) { + mv643xx_eth_stop(dev); + if (mv643xx_eth_open(dev)) { + netdev_err(dev, + "fatal error on re-opening device after ring param change\n"); + return -ENOMEM; + } + } + + return 0; +} + + +static int +mv643xx_eth_set_features(struct net_device *dev, u32 features) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + u32 rx_csum = features & NETIF_F_RXCSUM; + + wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); + + return 0; +} + +static void mv643xx_eth_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + int i; + + if (stringset == ETH_SS_STATS) { + for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { + memcpy(data + i * ETH_GSTRING_LEN, + mv643xx_eth_stats[i].stat_string, + ETH_GSTRING_LEN); + } + } +} + +static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + uint64_t *data) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int i; + + mv643xx_eth_get_stats(dev); + mib_counters_update(mp); + mv643xx_eth_grab_lro_stats(mp); + + for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { + const struct mv643xx_eth_stats *stat; + void *p; + + stat = mv643xx_eth_stats + i; + + if (stat->netdev_off >= 0) + p = ((void *)mp->dev) + stat->netdev_off; + else + p = ((void *)mp) + stat->mp_off; + + data[i] = (stat->sizeof_stat == 8) ? + *(uint64_t *)p : *(uint32_t *)p; + } +} + +static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mv643xx_eth_stats); + + return -EOPNOTSUPP; +} + +static const struct ethtool_ops mv643xx_eth_ethtool_ops = { + .get_settings = mv643xx_eth_get_settings, + .set_settings = mv643xx_eth_set_settings, + .get_drvinfo = mv643xx_eth_get_drvinfo, + .nway_reset = mv643xx_eth_nway_reset, + .get_link = ethtool_op_get_link, + .get_coalesce = mv643xx_eth_get_coalesce, + .set_coalesce = mv643xx_eth_set_coalesce, + .get_ringparam = mv643xx_eth_get_ringparam, + .set_ringparam = mv643xx_eth_set_ringparam, + .get_strings = mv643xx_eth_get_strings, + .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, + .get_sset_count = mv643xx_eth_get_sset_count, +}; + + +/* address handling *********************************************************/ +static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) +{ + unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); + unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); + + addr[0] = (mac_h >> 24) & 0xff; + addr[1] = (mac_h >> 16) & 0xff; + addr[2] = (mac_h >> 8) & 0xff; + addr[3] = mac_h & 0xff; + addr[4] = (mac_l >> 8) & 0xff; + addr[5] = mac_l & 0xff; +} + +static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) +{ + wrlp(mp, MAC_ADDR_HIGH, + (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); + wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); +} + +static u32 uc_addr_filter_mask(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + u32 nibbles; + + if (dev->flags & IFF_PROMISC) + return 0; + + nibbles = 1 << (dev->dev_addr[5] & 0x0f); + netdev_for_each_uc_addr(ha, dev) { + if (memcmp(dev->dev_addr, ha->addr, 5)) + return 0; + if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) + return 0; + + nibbles |= 1 << (ha->addr[5] & 0x0f); + } + + return nibbles; +} + +static void mv643xx_eth_program_unicast_filter(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + u32 port_config; + u32 nibbles; + int i; + + uc_addr_set(mp, dev->dev_addr); + + port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; + + nibbles = uc_addr_filter_mask(dev); + if (!nibbles) { + port_config |= UNICAST_PROMISCUOUS_MODE; + nibbles = 0xffff; + } + + for (i = 0; i < 16; i += 4) { + int off = UNICAST_TABLE(mp->port_num) + i; + u32 v; + + v = 0; + if (nibbles & 1) + v |= 0x00000001; + if (nibbles & 2) + v |= 0x00000100; + if (nibbles & 4) + v |= 0x00010000; + if (nibbles & 8) + v |= 0x01000000; + nibbles >>= 4; + + wrl(mp, off, v); + } + + wrlp(mp, PORT_CONFIG, port_config); +} + +static int addr_crc(unsigned char *addr) +{ + int crc = 0; + int i; + + for (i = 0; i < 6; i++) { + int j; + + crc = (crc ^ addr[i]) << 8; + for (j = 7; j >= 0; j--) { + if (crc & (0x100 << j)) + crc ^= 0x107 << j; + } + } + + return crc; +} + +static void mv643xx_eth_program_multicast_filter(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + u32 *mc_spec; + u32 *mc_other; + struct netdev_hw_addr *ha; + int i; + + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + int port_num; + u32 accept; + +oom: + port_num = mp->port_num; + accept = 0x01010101; + for (i = 0; i < 0x100; i += 4) { + wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); + wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); + } + return; + } + + mc_spec = kmalloc(0x200, GFP_ATOMIC); + if (mc_spec == NULL) + goto oom; + mc_other = mc_spec + (0x100 >> 2); + + memset(mc_spec, 0, 0x100); + memset(mc_other, 0, 0x100); + + netdev_for_each_mc_addr(ha, dev) { + u8 *a = ha->addr; + u32 *table; + int entry; + + if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { + table = mc_spec; + entry = a[5]; + } else { + table = mc_other; + entry = addr_crc(a); + } + + table[entry >> 2] |= 1 << (8 * (entry & 3)); + } + + for (i = 0; i < 0x100; i += 4) { + wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); + wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); + } + + kfree(mc_spec); +} + +static void mv643xx_eth_set_rx_mode(struct net_device *dev) +{ + mv643xx_eth_program_unicast_filter(dev); + mv643xx_eth_program_multicast_filter(dev); +} + +static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + + netif_addr_lock_bh(dev); + mv643xx_eth_program_unicast_filter(dev); + netif_addr_unlock_bh(dev); + + return 0; +} + + +/* rx/tx queue initialisation ***********************************************/ +static int rxq_init(struct mv643xx_eth_private *mp, int index) +{ + struct rx_queue *rxq = mp->rxq + index; + struct rx_desc *rx_desc; + int size; + int i; + + rxq->index = index; + + rxq->rx_ring_size = mp->rx_ring_size; + + rxq->rx_desc_count = 0; + rxq->rx_curr_desc = 0; + rxq->rx_used_desc = 0; + + size = rxq->rx_ring_size * sizeof(struct rx_desc); + + if (index == 0 && size <= mp->rx_desc_sram_size) { + rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, + mp->rx_desc_sram_size); + rxq->rx_desc_dma = mp->rx_desc_sram_addr; + } else { + rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &rxq->rx_desc_dma, + GFP_KERNEL); + } + + if (rxq->rx_desc_area == NULL) { + netdev_err(mp->dev, + "can't allocate rx ring (%d bytes)\n", size); + goto out; + } + memset(rxq->rx_desc_area, 0, size); + + rxq->rx_desc_area_size = size; + rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), + GFP_KERNEL); + if (rxq->rx_skb == NULL) { + netdev_err(mp->dev, "can't allocate rx skb ring\n"); + goto out_free; + } + + rx_desc = (struct rx_desc *)rxq->rx_desc_area; + for (i = 0; i < rxq->rx_ring_size; i++) { + int nexti; + + nexti = i + 1; + if (nexti == rxq->rx_ring_size) + nexti = 0; + + rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + + nexti * sizeof(struct rx_desc); + } + + rxq->lro_mgr.dev = mp->dev; + memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); + rxq->lro_mgr.features = LRO_F_NAPI; + rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; + rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; + rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); + rxq->lro_mgr.max_aggr = 32; + rxq->lro_mgr.frag_align_pad = 0; + rxq->lro_mgr.lro_arr = rxq->lro_arr; + rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; + + memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); + + return 0; + + +out_free: + if (index == 0 && size <= mp->rx_desc_sram_size) + iounmap(rxq->rx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, size, + rxq->rx_desc_area, + rxq->rx_desc_dma); + +out: + return -ENOMEM; +} + +static void rxq_deinit(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + int i; + + rxq_disable(rxq); + + for (i = 0; i < rxq->rx_ring_size; i++) { + if (rxq->rx_skb[i]) { + dev_kfree_skb(rxq->rx_skb[i]); + rxq->rx_desc_count--; + } + } + + if (rxq->rx_desc_count) { + netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", + rxq->rx_desc_count); + } + + if (rxq->index == 0 && + rxq->rx_desc_area_size <= mp->rx_desc_sram_size) + iounmap(rxq->rx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, + rxq->rx_desc_area, rxq->rx_desc_dma); + + kfree(rxq->rx_skb); +} + +static int txq_init(struct mv643xx_eth_private *mp, int index) +{ + struct tx_queue *txq = mp->txq + index; + struct tx_desc *tx_desc; + int size; + int i; + + txq->index = index; + + txq->tx_ring_size = mp->tx_ring_size; + + txq->tx_desc_count = 0; + txq->tx_curr_desc = 0; + txq->tx_used_desc = 0; + + size = txq->tx_ring_size * sizeof(struct tx_desc); + + if (index == 0 && size <= mp->tx_desc_sram_size) { + txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, + mp->tx_desc_sram_size); + txq->tx_desc_dma = mp->tx_desc_sram_addr; + } else { + txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &txq->tx_desc_dma, + GFP_KERNEL); + } + + if (txq->tx_desc_area == NULL) { + netdev_err(mp->dev, + "can't allocate tx ring (%d bytes)\n", size); + return -ENOMEM; + } + memset(txq->tx_desc_area, 0, size); + + txq->tx_desc_area_size = size; + + tx_desc = (struct tx_desc *)txq->tx_desc_area; + for (i = 0; i < txq->tx_ring_size; i++) { + struct tx_desc *txd = tx_desc + i; + int nexti; + + nexti = i + 1; + if (nexti == txq->tx_ring_size) + nexti = 0; + + txd->cmd_sts = 0; + txd->next_desc_ptr = txq->tx_desc_dma + + nexti * sizeof(struct tx_desc); + } + + skb_queue_head_init(&txq->tx_skb); + + return 0; +} + +static void txq_deinit(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + + txq_disable(txq); + txq_reclaim(txq, txq->tx_ring_size, 1); + + BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); + + if (txq->index == 0 && + txq->tx_desc_area_size <= mp->tx_desc_sram_size) + iounmap(txq->tx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, + txq->tx_desc_area, txq->tx_desc_dma); +} + + +/* netdev ops and related ***************************************************/ +static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) +{ + u32 int_cause; + u32 int_cause_ext; + + int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; + if (int_cause == 0) + return 0; + + int_cause_ext = 0; + if (int_cause & INT_EXT) { + int_cause &= ~INT_EXT; + int_cause_ext = rdlp(mp, INT_CAUSE_EXT); + } + + if (int_cause) { + wrlp(mp, INT_CAUSE, ~int_cause); + mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & + ~(rdlp(mp, TXQ_COMMAND) & 0xff); + mp->work_rx |= (int_cause & INT_RX) >> 2; + } + + int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; + if (int_cause_ext) { + wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); + if (int_cause_ext & INT_EXT_LINK_PHY) + mp->work_link = 1; + mp->work_tx |= int_cause_ext & INT_EXT_TX; + } + + return 1; +} + +static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (unlikely(!mv643xx_eth_collect_events(mp))) + return IRQ_NONE; + + wrlp(mp, INT_MASK, 0); + napi_schedule(&mp->napi); + + return IRQ_HANDLED; +} + +static void handle_link_event(struct mv643xx_eth_private *mp) +{ + struct net_device *dev = mp->dev; + u32 port_status; + int speed; + int duplex; + int fc; + + port_status = rdlp(mp, PORT_STATUS); + if (!(port_status & LINK_UP)) { + if (netif_carrier_ok(dev)) { + int i; + + netdev_info(dev, "link down\n"); + + netif_carrier_off(dev); + + for (i = 0; i < mp->txq_count; i++) { + struct tx_queue *txq = mp->txq + i; + + txq_reclaim(txq, txq->tx_ring_size, 1); + txq_reset_hw_ptr(txq); + } + } + return; + } + + switch (port_status & PORT_SPEED_MASK) { + case PORT_SPEED_10: + speed = 10; + break; + case PORT_SPEED_100: + speed = 100; + break; + case PORT_SPEED_1000: + speed = 1000; + break; + default: + speed = -1; + break; + } + duplex = (port_status & FULL_DUPLEX) ? 1 : 0; + fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; + + netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", + speed, duplex ? "full" : "half", fc ? "en" : "dis"); + + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); +} + +static int mv643xx_eth_poll(struct napi_struct *napi, int budget) +{ + struct mv643xx_eth_private *mp; + int work_done; + + mp = container_of(napi, struct mv643xx_eth_private, napi); + + if (unlikely(mp->oom)) { + mp->oom = 0; + del_timer(&mp->rx_oom); + } + + work_done = 0; + while (work_done < budget) { + u8 queue_mask; + int queue; + int work_tbd; + + if (mp->work_link) { + mp->work_link = 0; + handle_link_event(mp); + work_done++; + continue; + } + + queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; + if (likely(!mp->oom)) + queue_mask |= mp->work_rx_refill; + + if (!queue_mask) { + if (mv643xx_eth_collect_events(mp)) + continue; + break; + } + + queue = fls(queue_mask) - 1; + queue_mask = 1 << queue; + + work_tbd = budget - work_done; + if (work_tbd > 16) + work_tbd = 16; + + if (mp->work_tx_end & queue_mask) { + txq_kick(mp->txq + queue); + } else if (mp->work_tx & queue_mask) { + work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); + txq_maybe_wake(mp->txq + queue); + } else if (mp->work_rx & queue_mask) { + work_done += rxq_process(mp->rxq + queue, work_tbd); + } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { + work_done += rxq_refill(mp->rxq + queue, work_tbd); + } else { + BUG(); + } + } + + if (work_done < budget) { + if (mp->oom) + mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); + napi_complete(napi); + wrlp(mp, INT_MASK, mp->int_mask); + } + + return work_done; +} + +static inline void oom_timer_wrapper(unsigned long data) +{ + struct mv643xx_eth_private *mp = (void *)data; + + napi_schedule(&mp->napi); +} + +static void phy_reset(struct mv643xx_eth_private *mp) +{ + int data; + + data = phy_read(mp->phy, MII_BMCR); + if (data < 0) + return; + + data |= BMCR_RESET; + if (phy_write(mp->phy, MII_BMCR, data) < 0) + return; + + do { + data = phy_read(mp->phy, MII_BMCR); + } while (data >= 0 && data & BMCR_RESET); +} + +static void port_start(struct mv643xx_eth_private *mp) +{ + u32 pscr; + int i; + + /* + * Perform PHY reset, if there is a PHY. + */ + if (mp->phy != NULL) { + struct ethtool_cmd cmd; + + mv643xx_eth_get_settings(mp->dev, &cmd); + phy_reset(mp); + mv643xx_eth_set_settings(mp->dev, &cmd); + } + + /* + * Configure basic link parameters. + */ + pscr = rdlp(mp, PORT_SERIAL_CONTROL); + + pscr |= SERIAL_PORT_ENABLE; + wrlp(mp, PORT_SERIAL_CONTROL, pscr); + + pscr |= DO_NOT_FORCE_LINK_FAIL; + if (mp->phy == NULL) + pscr |= FORCE_LINK_PASS; + wrlp(mp, PORT_SERIAL_CONTROL, pscr); + + /* + * Configure TX path and queues. + */ + tx_set_rate(mp, 1000000000, 16777216); + for (i = 0; i < mp->txq_count; i++) { + struct tx_queue *txq = mp->txq + i; + + txq_reset_hw_ptr(txq); + txq_set_rate(txq, 1000000000, 16777216); + txq_set_fixed_prio_mode(txq); + } + + /* + * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast + * frames to RX queue #0, and include the pseudo-header when + * calculating receive checksums. + */ + mv643xx_eth_set_features(mp->dev, mp->dev->features); + + /* + * Treat BPDUs as normal multicasts, and disable partition mode. + */ + wrlp(mp, PORT_CONFIG_EXT, 0x00000000); + + /* + * Add configured unicast addresses to address filter table. + */ + mv643xx_eth_program_unicast_filter(mp->dev); + + /* + * Enable the receive queues. + */ + for (i = 0; i < mp->rxq_count; i++) { + struct rx_queue *rxq = mp->rxq + i; + u32 addr; + + addr = (u32)rxq->rx_desc_dma; + addr += rxq->rx_curr_desc * sizeof(struct rx_desc); + wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); + + rxq_enable(rxq); + } +} + +static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) +{ + int skb_size; + + /* + * Reserve 2+14 bytes for an ethernet header (the hardware + * automatically prepends 2 bytes of dummy data to each + * received packet), 16 bytes for up to four VLAN tags, and + * 4 bytes for the trailing FCS -- 36 bytes total. + */ + skb_size = mp->dev->mtu + 36; + + /* + * Make sure that the skb size is a multiple of 8 bytes, as + * the lower three bits of the receive descriptor's buffer + * size field are ignored by the hardware. + */ + mp->skb_size = (skb_size + 7) & ~7; + + /* + * If NET_SKB_PAD is smaller than a cache line, + * netdev_alloc_skb() will cause skb->data to be misaligned + * to a cache line boundary. If this is the case, include + * some extra space to allow re-aligning the data area. + */ + mp->skb_size += SKB_DMA_REALIGN; +} + +static int mv643xx_eth_open(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int err; + int i; + + wrlp(mp, INT_CAUSE, 0); + wrlp(mp, INT_CAUSE_EXT, 0); + rdlp(mp, INT_CAUSE_EXT); + + err = request_irq(dev->irq, mv643xx_eth_irq, + IRQF_SHARED, dev->name, dev); + if (err) { + netdev_err(dev, "can't assign irq\n"); + return -EAGAIN; + } + + mv643xx_eth_recalc_skb_size(mp); + + napi_enable(&mp->napi); + + skb_queue_head_init(&mp->rx_recycle); + + mp->int_mask = INT_EXT; + + for (i = 0; i < mp->rxq_count; i++) { + err = rxq_init(mp, i); + if (err) { + while (--i >= 0) + rxq_deinit(mp->rxq + i); + goto out; + } + + rxq_refill(mp->rxq + i, INT_MAX); + mp->int_mask |= INT_RX_0 << i; + } + + if (mp->oom) { + mp->rx_oom.expires = jiffies + (HZ / 10); + add_timer(&mp->rx_oom); + } + + for (i = 0; i < mp->txq_count; i++) { + err = txq_init(mp, i); + if (err) { + while (--i >= 0) + txq_deinit(mp->txq + i); + goto out_free; + } + mp->int_mask |= INT_TX_END_0 << i; + } + + port_start(mp); + + wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); + wrlp(mp, INT_MASK, mp->int_mask); + + return 0; + + +out_free: + for (i = 0; i < mp->rxq_count; i++) + rxq_deinit(mp->rxq + i); +out: + free_irq(dev->irq, dev); + + return err; +} + +static void port_reset(struct mv643xx_eth_private *mp) +{ + unsigned int data; + int i; + + for (i = 0; i < mp->rxq_count; i++) + rxq_disable(mp->rxq + i); + for (i = 0; i < mp->txq_count; i++) + txq_disable(mp->txq + i); + + while (1) { + u32 ps = rdlp(mp, PORT_STATUS); + + if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) + break; + udelay(10); + } + + /* Reset the Enable bit in the Configuration Register */ + data = rdlp(mp, PORT_SERIAL_CONTROL); + data &= ~(SERIAL_PORT_ENABLE | + DO_NOT_FORCE_LINK_FAIL | + FORCE_LINK_PASS); + wrlp(mp, PORT_SERIAL_CONTROL, data); +} + +static int mv643xx_eth_stop(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int i; + + wrlp(mp, INT_MASK_EXT, 0x00000000); + wrlp(mp, INT_MASK, 0x00000000); + rdlp(mp, INT_MASK); + + napi_disable(&mp->napi); + + del_timer_sync(&mp->rx_oom); + + netif_carrier_off(dev); + + free_irq(dev->irq, dev); + + port_reset(mp); + mv643xx_eth_get_stats(dev); + mib_counters_update(mp); + del_timer_sync(&mp->mib_counters_timer); + + skb_queue_purge(&mp->rx_recycle); + + for (i = 0; i < mp->rxq_count; i++) + rxq_deinit(mp->rxq + i); + for (i = 0; i < mp->txq_count; i++) + txq_deinit(mp->txq + i); + + return 0; +} + +static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (mp->phy != NULL) + return phy_mii_ioctl(mp->phy, ifr, cmd); + + return -EOPNOTSUPP; +} + +static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (new_mtu < 64 || new_mtu > 9500) + return -EINVAL; + + dev->mtu = new_mtu; + mv643xx_eth_recalc_skb_size(mp); + tx_set_rate(mp, 1000000000, 16777216); + + if (!netif_running(dev)) + return 0; + + /* + * Stop and then re-open the interface. This will allocate RX + * skbs of the new MTU. + * There is a possible danger that the open will not succeed, + * due to memory being full. + */ + mv643xx_eth_stop(dev); + if (mv643xx_eth_open(dev)) { + netdev_err(dev, + "fatal error on re-opening device after MTU change\n"); + } + + return 0; +} + +static void tx_timeout_task(struct work_struct *ugly) +{ + struct mv643xx_eth_private *mp; + + mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); + if (netif_running(mp->dev)) { + netif_tx_stop_all_queues(mp->dev); + port_reset(mp); + port_start(mp); + netif_tx_wake_all_queues(mp->dev); + } +} + +static void mv643xx_eth_tx_timeout(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + netdev_info(dev, "tx timeout\n"); + + schedule_work(&mp->tx_timeout_task); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mv643xx_eth_netpoll(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + wrlp(mp, INT_MASK, 0x00000000); + rdlp(mp, INT_MASK); + + mv643xx_eth_irq(dev->irq, dev); + + wrlp(mp, INT_MASK, mp->int_mask); +} +#endif + + +/* platform glue ************************************************************/ +static void +mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, + struct mbus_dram_target_info *dram) +{ + void __iomem *base = msp->base; + u32 win_enable; + u32 win_protect; + int i; + + for (i = 0; i < 6; i++) { + writel(0, base + WINDOW_BASE(i)); + writel(0, base + WINDOW_SIZE(i)); + if (i < 4) + writel(0, base + WINDOW_REMAP_HIGH(i)); + } + + win_enable = 0x3f; + win_protect = 0; + + for (i = 0; i < dram->num_cs; i++) { + struct mbus_dram_window *cs = dram->cs + i; + + writel((cs->base & 0xffff0000) | + (cs->mbus_attr << 8) | + dram->mbus_dram_target_id, base + WINDOW_BASE(i)); + writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); + + win_enable &= ~(1 << i); + win_protect |= 3 << (2 * i); + } + + writel(win_enable, base + WINDOW_BAR_ENABLE); + msp->win_protect = win_protect; +} + +static void infer_hw_params(struct mv643xx_eth_shared_private *msp) +{ + /* + * Check whether we have a 14-bit coal limit field in bits + * [21:8], or a 16-bit coal limit in bits [25,21:7] of the + * SDMA config register. + */ + writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); + if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) + msp->extended_rx_coal_limit = 1; + else + msp->extended_rx_coal_limit = 0; + + /* + * Check whether the MAC supports TX rate control, and if + * yes, whether its associated registers are in the old or + * the new place. + */ + writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); + if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { + msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; + } else { + writel(7, msp->base + 0x0400 + TX_BW_RATE); + if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) + msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; + else + msp->tx_bw_control = TX_BW_CONTROL_ABSENT; + } +} + +static int mv643xx_eth_shared_probe(struct platform_device *pdev) +{ + static int mv643xx_eth_version_printed; + struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; + struct mv643xx_eth_shared_private *msp; + struct resource *res; + int ret; + + if (!mv643xx_eth_version_printed++) + pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", + mv643xx_eth_driver_version); + + ret = -EINVAL; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) + goto out; + + ret = -ENOMEM; + msp = kzalloc(sizeof(*msp), GFP_KERNEL); + if (msp == NULL) + goto out; + + msp->base = ioremap(res->start, resource_size(res)); + if (msp->base == NULL) + goto out_free; + + /* + * Set up and register SMI bus. + */ + if (pd == NULL || pd->shared_smi == NULL) { + msp->smi_bus = mdiobus_alloc(); + if (msp->smi_bus == NULL) + goto out_unmap; + + msp->smi_bus->priv = msp; + msp->smi_bus->name = "mv643xx_eth smi"; + msp->smi_bus->read = smi_bus_read; + msp->smi_bus->write = smi_bus_write, + snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); + msp->smi_bus->parent = &pdev->dev; + msp->smi_bus->phy_mask = 0xffffffff; + if (mdiobus_register(msp->smi_bus) < 0) + goto out_free_mii_bus; + msp->smi = msp; + } else { + msp->smi = platform_get_drvdata(pd->shared_smi); + } + + msp->err_interrupt = NO_IRQ; + init_waitqueue_head(&msp->smi_busy_wait); + + /* + * Check whether the error interrupt is hooked up. + */ + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (res != NULL) { + int err; + + err = request_irq(res->start, mv643xx_eth_err_irq, + IRQF_SHARED, "mv643xx_eth", msp); + if (!err) { + writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); + msp->err_interrupt = res->start; + } + } + + /* + * (Re-)program MBUS remapping windows if we are asked to. + */ + if (pd != NULL && pd->dram != NULL) + mv643xx_eth_conf_mbus_windows(msp, pd->dram); + + /* + * Detect hardware parameters. + */ + msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; + msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? + pd->tx_csum_limit : 9 * 1024; + infer_hw_params(msp); + + platform_set_drvdata(pdev, msp); + + return 0; + +out_free_mii_bus: + mdiobus_free(msp->smi_bus); +out_unmap: + iounmap(msp->base); +out_free: + kfree(msp); +out: + return ret; +} + +static int mv643xx_eth_shared_remove(struct platform_device *pdev) +{ + struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); + struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; + + if (pd == NULL || pd->shared_smi == NULL) { + mdiobus_unregister(msp->smi_bus); + mdiobus_free(msp->smi_bus); + } + if (msp->err_interrupt != NO_IRQ) + free_irq(msp->err_interrupt, msp); + iounmap(msp->base); + kfree(msp); + + return 0; +} + +static struct platform_driver mv643xx_eth_shared_driver = { + .probe = mv643xx_eth_shared_probe, + .remove = mv643xx_eth_shared_remove, + .driver = { + .name = MV643XX_ETH_SHARED_NAME, + .owner = THIS_MODULE, + }, +}; + +static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) +{ + int addr_shift = 5 * mp->port_num; + u32 data; + + data = rdl(mp, PHY_ADDR); + data &= ~(0x1f << addr_shift); + data |= (phy_addr & 0x1f) << addr_shift; + wrl(mp, PHY_ADDR, data); +} + +static int phy_addr_get(struct mv643xx_eth_private *mp) +{ + unsigned int data; + + data = rdl(mp, PHY_ADDR); + + return (data >> (5 * mp->port_num)) & 0x1f; +} + +static void set_params(struct mv643xx_eth_private *mp, + struct mv643xx_eth_platform_data *pd) +{ + struct net_device *dev = mp->dev; + + if (is_valid_ether_addr(pd->mac_addr)) + memcpy(dev->dev_addr, pd->mac_addr, 6); + else + uc_addr_get(mp, dev->dev_addr); + + mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; + if (pd->rx_queue_size) + mp->rx_ring_size = pd->rx_queue_size; + mp->rx_desc_sram_addr = pd->rx_sram_addr; + mp->rx_desc_sram_size = pd->rx_sram_size; + + mp->rxq_count = pd->rx_queue_count ? : 1; + + mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; + if (pd->tx_queue_size) + mp->tx_ring_size = pd->tx_queue_size; + mp->tx_desc_sram_addr = pd->tx_sram_addr; + mp->tx_desc_sram_size = pd->tx_sram_size; + + mp->txq_count = pd->tx_queue_count ? : 1; +} + +static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, + int phy_addr) +{ + struct mii_bus *bus = mp->shared->smi->smi_bus; + struct phy_device *phydev; + int start; + int num; + int i; + + if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { + start = phy_addr_get(mp) & 0x1f; + num = 32; + } else { + start = phy_addr & 0x1f; + num = 1; + } + + phydev = NULL; + for (i = 0; i < num; i++) { + int addr = (start + i) & 0x1f; + + if (bus->phy_map[addr] == NULL) + mdiobus_scan(bus, addr); + + if (phydev == NULL) { + phydev = bus->phy_map[addr]; + if (phydev != NULL) + phy_addr_set(mp, addr); + } + } + + return phydev; +} + +static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) +{ + struct phy_device *phy = mp->phy; + + phy_reset(mp); + + phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); + + if (speed == 0) { + phy->autoneg = AUTONEG_ENABLE; + phy->speed = 0; + phy->duplex = 0; + phy->advertising = phy->supported | ADVERTISED_Autoneg; + } else { + phy->autoneg = AUTONEG_DISABLE; + phy->advertising = 0; + phy->speed = speed; + phy->duplex = duplex; + } + phy_start_aneg(phy); +} + +static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) +{ + u32 pscr; + + pscr = rdlp(mp, PORT_SERIAL_CONTROL); + if (pscr & SERIAL_PORT_ENABLE) { + pscr &= ~SERIAL_PORT_ENABLE; + wrlp(mp, PORT_SERIAL_CONTROL, pscr); + } + + pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; + if (mp->phy == NULL) { + pscr |= DISABLE_AUTO_NEG_SPEED_GMII; + if (speed == SPEED_1000) + pscr |= SET_GMII_SPEED_TO_1000; + else if (speed == SPEED_100) + pscr |= SET_MII_SPEED_TO_100; + + pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; + + pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; + if (duplex == DUPLEX_FULL) + pscr |= SET_FULL_DUPLEX_MODE; + } + + wrlp(mp, PORT_SERIAL_CONTROL, pscr); +} + +static const struct net_device_ops mv643xx_eth_netdev_ops = { + .ndo_open = mv643xx_eth_open, + .ndo_stop = mv643xx_eth_stop, + .ndo_start_xmit = mv643xx_eth_xmit, + .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, + .ndo_set_mac_address = mv643xx_eth_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = mv643xx_eth_ioctl, + .ndo_change_mtu = mv643xx_eth_change_mtu, + .ndo_set_features = mv643xx_eth_set_features, + .ndo_tx_timeout = mv643xx_eth_tx_timeout, + .ndo_get_stats = mv643xx_eth_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mv643xx_eth_netpoll, +#endif +}; + +static int mv643xx_eth_probe(struct platform_device *pdev) +{ + struct mv643xx_eth_platform_data *pd; + struct mv643xx_eth_private *mp; + struct net_device *dev; + struct resource *res; + int err; + + pd = pdev->dev.platform_data; + if (pd == NULL) { + dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); + return -ENODEV; + } + + if (pd->shared == NULL) { + dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); + return -ENODEV; + } + + dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); + if (!dev) + return -ENOMEM; + + mp = netdev_priv(dev); + platform_set_drvdata(pdev, mp); + + mp->shared = platform_get_drvdata(pd->shared); + mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); + mp->port_num = pd->port_number; + + mp->dev = dev; + + set_params(mp, pd); + netif_set_real_num_tx_queues(dev, mp->txq_count); + netif_set_real_num_rx_queues(dev, mp->rxq_count); + + if (pd->phy_addr != MV643XX_ETH_PHY_NONE) + mp->phy = phy_scan(mp, pd->phy_addr); + + if (mp->phy != NULL) + phy_init(mp, pd->speed, pd->duplex); + + SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); + + init_pscr(mp, pd->speed, pd->duplex); + + + mib_counters_clear(mp); + + init_timer(&mp->mib_counters_timer); + mp->mib_counters_timer.data = (unsigned long)mp; + mp->mib_counters_timer.function = mib_counters_timer_wrapper; + mp->mib_counters_timer.expires = jiffies + 30 * HZ; + add_timer(&mp->mib_counters_timer); + + spin_lock_init(&mp->mib_counters_lock); + + INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); + + netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); + + init_timer(&mp->rx_oom); + mp->rx_oom.data = (unsigned long)mp; + mp->rx_oom.function = oom_timer_wrapper; + + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + BUG_ON(!res); + dev->irq = res->start; + + dev->netdev_ops = &mv643xx_eth_netdev_ops; + + dev->watchdog_timeo = 2 * HZ; + dev->base_addr = 0; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_LRO; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; + + SET_NETDEV_DEV(dev, &pdev->dev); + + if (mp->shared->win_protect) + wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); + + netif_carrier_off(dev); + + wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); + + set_rx_coal(mp, 250); + set_tx_coal(mp, 0); + + err = register_netdev(dev); + if (err) + goto out; + + netdev_notice(dev, "port %d with MAC address %pM\n", + mp->port_num, dev->dev_addr); + + if (mp->tx_desc_sram_size > 0) + netdev_notice(dev, "configured with sram\n"); + + return 0; + +out: + free_netdev(dev); + + return err; +} + +static int mv643xx_eth_remove(struct platform_device *pdev) +{ + struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); + + unregister_netdev(mp->dev); + if (mp->phy != NULL) + phy_detach(mp->phy); + cancel_work_sync(&mp->tx_timeout_task); + free_netdev(mp->dev); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static void mv643xx_eth_shutdown(struct platform_device *pdev) +{ + struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); + + /* Mask all interrupts on ethernet port */ + wrlp(mp, INT_MASK, 0); + rdlp(mp, INT_MASK); + + if (netif_running(mp->dev)) + port_reset(mp); +} + +static struct platform_driver mv643xx_eth_driver = { + .probe = mv643xx_eth_probe, + .remove = mv643xx_eth_remove, + .shutdown = mv643xx_eth_shutdown, + .driver = { + .name = MV643XX_ETH_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init mv643xx_eth_init_module(void) +{ + int rc; + + rc = platform_driver_register(&mv643xx_eth_shared_driver); + if (!rc) { + rc = platform_driver_register(&mv643xx_eth_driver); + if (rc) + platform_driver_unregister(&mv643xx_eth_shared_driver); + } + + return rc; +} +module_init(mv643xx_eth_init_module); + +static void __exit mv643xx_eth_cleanup_module(void) +{ + platform_driver_unregister(&mv643xx_eth_driver); + platform_driver_unregister(&mv643xx_eth_shared_driver); +} +module_exit(mv643xx_eth_cleanup_module); + +MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " + "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); +MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); +MODULE_ALIAS("platform:" MV643XX_ETH_NAME); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c new file mode 100644 index 000000000000..1a3033d8e7ed --- /dev/null +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -0,0 +1,1662 @@ +/* + * PXA168 ethernet driver. + * Most of the code is derived from mv643xx ethernet driver. + * + * Copyright (C) 2010 Marvell International Ltd. + * Sachin Sanap + * Zhangfei Gao + * Philip Rakity + * Mark Brown + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "pxa168-eth" +#define DRIVER_VERSION "0.3" + +/* + * Registers + */ + +#define PHY_ADDRESS 0x0000 +#define SMI 0x0010 +#define PORT_CONFIG 0x0400 +#define PORT_CONFIG_EXT 0x0408 +#define PORT_COMMAND 0x0410 +#define PORT_STATUS 0x0418 +#define HTPR 0x0428 +#define SDMA_CONFIG 0x0440 +#define SDMA_CMD 0x0448 +#define INT_CAUSE 0x0450 +#define INT_W_CLEAR 0x0454 +#define INT_MASK 0x0458 +#define ETH_F_RX_DESC_0 0x0480 +#define ETH_C_RX_DESC_0 0x04A0 +#define ETH_C_TX_DESC_1 0x04E4 + +/* smi register */ +#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */ +#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */ +#define SMI_OP_W (0 << 26) /* Write operation */ +#define SMI_OP_R (1 << 26) /* Read operation */ + +#define PHY_WAIT_ITERATIONS 10 + +#define PXA168_ETH_PHY_ADDR_DEFAULT 0 +/* RX & TX descriptor command */ +#define BUF_OWNED_BY_DMA (1 << 31) + +/* RX descriptor status */ +#define RX_EN_INT (1 << 23) +#define RX_FIRST_DESC (1 << 17) +#define RX_LAST_DESC (1 << 16) +#define RX_ERROR (1 << 15) + +/* TX descriptor command */ +#define TX_EN_INT (1 << 23) +#define TX_GEN_CRC (1 << 22) +#define TX_ZERO_PADDING (1 << 18) +#define TX_FIRST_DESC (1 << 17) +#define TX_LAST_DESC (1 << 16) +#define TX_ERROR (1 << 15) + +/* SDMA_CMD */ +#define SDMA_CMD_AT (1 << 31) +#define SDMA_CMD_TXDL (1 << 24) +#define SDMA_CMD_TXDH (1 << 23) +#define SDMA_CMD_AR (1 << 15) +#define SDMA_CMD_ERD (1 << 7) + +/* Bit definitions of the Port Config Reg */ +#define PCR_HS (1 << 12) +#define PCR_EN (1 << 7) +#define PCR_PM (1 << 0) + +/* Bit definitions of the Port Config Extend Reg */ +#define PCXR_2BSM (1 << 28) +#define PCXR_DSCP_EN (1 << 21) +#define PCXR_MFL_1518 (0 << 14) +#define PCXR_MFL_1536 (1 << 14) +#define PCXR_MFL_2048 (2 << 14) +#define PCXR_MFL_64K (3 << 14) +#define PCXR_FLP (1 << 11) +#define PCXR_PRIO_TX_OFF 3 +#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) + +/* Bit definitions of the SDMA Config Reg */ +#define SDCR_BSZ_OFF 12 +#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF) +#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF) +#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF) +#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF) +#define SDCR_BLMR (1 << 6) +#define SDCR_BLMT (1 << 7) +#define SDCR_RIFB (1 << 9) +#define SDCR_RC_OFF 2 +#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF) + +/* + * Bit definitions of the Interrupt Cause Reg + * and Interrupt MASK Reg is the same + */ +#define ICR_RXBUF (1 << 0) +#define ICR_TXBUF_H (1 << 2) +#define ICR_TXBUF_L (1 << 3) +#define ICR_TXEND_H (1 << 6) +#define ICR_TXEND_L (1 << 7) +#define ICR_RXERR (1 << 8) +#define ICR_TXERR_H (1 << 10) +#define ICR_TXERR_L (1 << 11) +#define ICR_TX_UDR (1 << 13) +#define ICR_MII_CH (1 << 28) + +#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\ + ICR_TXERR_H | ICR_TXERR_L |\ + ICR_TXEND_H | ICR_TXEND_L |\ + ICR_RXBUF | ICR_RXERR | ICR_MII_CH) + +#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ + +#define NUM_RX_DESCS 64 +#define NUM_TX_DESCS 64 + +#define HASH_ADD 0 +#define HASH_DELETE 1 +#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */ +#define HOP_NUMBER 12 + +/* Bit definitions for Port status */ +#define PORT_SPEED_100 (1 << 0) +#define FULL_DUPLEX (1 << 1) +#define FLOW_CONTROL_ENABLED (1 << 2) +#define LINK_UP (1 << 3) + +/* Bit definitions for work to be done */ +#define WORK_LINK (1 << 0) +#define WORK_TX_DONE (1 << 1) + +/* + * Misc definitions. + */ +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) + +struct rx_desc { + u32 cmd_sts; /* Descriptor command status */ + u16 byte_cnt; /* Descriptor buffer byte count */ + u16 buf_size; /* Buffer size */ + u32 buf_ptr; /* Descriptor buffer pointer */ + u32 next_desc_ptr; /* Next descriptor pointer */ +}; + +struct tx_desc { + u32 cmd_sts; /* Command/status field */ + u16 reserved; + u16 byte_cnt; /* buffer byte count */ + u32 buf_ptr; /* pointer to buffer for this descriptor */ + u32 next_desc_ptr; /* Pointer to next descriptor */ +}; + +struct pxa168_eth_private { + int port_num; /* User Ethernet port number */ + + int rx_resource_err; /* Rx ring resource error flag */ + + /* Next available and first returning Rx resource */ + int rx_curr_desc_q, rx_used_desc_q; + + /* Next available and first returning Tx resource */ + int tx_curr_desc_q, tx_used_desc_q; + + struct rx_desc *p_rx_desc_area; + dma_addr_t rx_desc_dma; + int rx_desc_area_size; + struct sk_buff **rx_skb; + + struct tx_desc *p_tx_desc_area; + dma_addr_t tx_desc_dma; + int tx_desc_area_size; + struct sk_buff **tx_skb; + + struct work_struct tx_timeout_task; + + struct net_device *dev; + struct napi_struct napi; + u8 work_todo; + int skb_size; + + struct net_device_stats stats; + /* Size of Tx Ring per queue */ + int tx_ring_size; + /* Number of tx descriptors in use */ + int tx_desc_count; + /* Size of Rx Ring per queue */ + int rx_ring_size; + /* Number of rx descriptors in use */ + int rx_desc_count; + + /* + * Used in case RX Ring is empty, which can occur when + * system does not have resources (skb's) + */ + struct timer_list timeout; + struct mii_bus *smi_bus; + struct phy_device *phy; + + /* clock */ + struct clk *clk; + struct pxa168_eth_platform_data *pd; + /* + * Ethernet controller base address. + */ + void __iomem *base; + + /* Pointer to the hardware address filter table */ + void *htpr; + dma_addr_t htpr_dma; +}; + +struct addr_table_entry { + __le32 lo; + __le32 hi; +}; + +/* Bit fields of a Hash Table Entry */ +enum hash_table_entry { + HASH_ENTRY_VALID = 1, + SKIP = 2, + HASH_ENTRY_RECEIVE_DISCARD = 4, + HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 +}; + +static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); +static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); +static int pxa168_init_hw(struct pxa168_eth_private *pep); +static void eth_port_reset(struct net_device *dev); +static void eth_port_start(struct net_device *dev); +static int pxa168_eth_open(struct net_device *dev); +static int pxa168_eth_stop(struct net_device *dev); +static int ethernet_phy_setup(struct net_device *dev); + +static inline u32 rdl(struct pxa168_eth_private *pep, int offset) +{ + return readl(pep->base + offset); +} + +static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) +{ + writel(data, pep->base + offset); +} + +static void abort_dma(struct pxa168_eth_private *pep) +{ + int delay; + int max_retries = 40; + + do { + wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT); + udelay(100); + + delay = 10; + while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT)) + && delay-- > 0) { + udelay(10); + } + } while (max_retries-- > 0 && delay <= 0); + + if (max_retries <= 0) + printk(KERN_ERR "%s : DMA Stuck\n", __func__); +} + +static int ethernet_phy_get(struct pxa168_eth_private *pep) +{ + unsigned int reg_data; + + reg_data = rdl(pep, PHY_ADDRESS); + + return (reg_data >> (5 * pep->port_num)) & 0x1f; +} + +static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr) +{ + u32 reg_data; + int addr_shift = 5 * pep->port_num; + + reg_data = rdl(pep, PHY_ADDRESS); + reg_data &= ~(0x1f << addr_shift); + reg_data |= (phy_addr & 0x1f) << addr_shift; + wrl(pep, PHY_ADDRESS, reg_data); +} + +static void ethernet_phy_reset(struct pxa168_eth_private *pep) +{ + int data; + + data = phy_read(pep->phy, MII_BMCR); + if (data < 0) + return; + + data |= BMCR_RESET; + if (phy_write(pep->phy, MII_BMCR, data) < 0) + return; + + do { + data = phy_read(pep->phy, MII_BMCR); + } while (data >= 0 && data & BMCR_RESET); +} + +static void rxq_refill(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct sk_buff *skb; + struct rx_desc *p_used_rx_desc; + int used_rx_desc; + + while (pep->rx_desc_count < pep->rx_ring_size) { + int size; + + skb = dev_alloc_skb(pep->skb_size); + if (!skb) + break; + if (SKB_DMA_REALIGN) + skb_reserve(skb, SKB_DMA_REALIGN); + pep->rx_desc_count++; + /* Get 'used' Rx descriptor */ + used_rx_desc = pep->rx_used_desc_q; + p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; + size = skb->end - skb->data; + p_used_rx_desc->buf_ptr = dma_map_single(NULL, + skb->data, + size, + DMA_FROM_DEVICE); + p_used_rx_desc->buf_size = size; + pep->rx_skb[used_rx_desc] = skb; + + /* Return the descriptor to DMA ownership */ + wmb(); + p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; + wmb(); + + /* Move the used descriptor pointer to the next descriptor */ + pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; + + /* Any Rx return cancels the Rx resource error status */ + pep->rx_resource_err = 0; + + skb_reserve(skb, ETH_HW_IP_ALIGN); + } + + /* + * If RX ring is empty of SKB, set a timer to try allocating + * again at a later time. + */ + if (pep->rx_desc_count == 0) { + pep->timeout.expires = jiffies + (HZ / 10); + add_timer(&pep->timeout); + } +} + +static inline void rxq_refill_timer_wrapper(unsigned long data) +{ + struct pxa168_eth_private *pep = (void *)data; + napi_schedule(&pep->napi); +} + +static inline u8 flip_8_bits(u8 x) +{ + return (((x) & 0x01) << 3) | (((x) & 0x02) << 1) + | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3) + | (((x) & 0x10) << 3) | (((x) & 0x20) << 1) + | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3); +} + +static void nibble_swap_every_byte(unsigned char *mac_addr) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) { + mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) | + ((mac_addr[i] & 0xf0) >> 4); + } +} + +static void inverse_every_nibble(unsigned char *mac_addr) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = flip_8_bits(mac_addr[i]); +} + +/* + * ---------------------------------------------------------------------------- + * This function will calculate the hash function of the address. + * Inputs + * mac_addr_orig - MAC address. + * Outputs + * return the calculated entry. + */ +static u32 hash_function(unsigned char *mac_addr_orig) +{ + u32 hash_result; + u32 addr0; + u32 addr1; + u32 addr2; + u32 addr3; + unsigned char mac_addr[ETH_ALEN]; + + /* Make a copy of MAC address since we are going to performe bit + * operations on it + */ + memcpy(mac_addr, mac_addr_orig, ETH_ALEN); + + nibble_swap_every_byte(mac_addr); + inverse_every_nibble(mac_addr); + + addr0 = (mac_addr[5] >> 2) & 0x3f; + addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2); + addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1; + addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8); + + hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3); + hash_result = hash_result & 0x07ff; + return hash_result; +} + +/* + * ---------------------------------------------------------------------------- + * This function will add/del an entry to the address table. + * Inputs + * pep - ETHERNET . + * mac_addr - MAC address. + * skip - if 1, skip this address.Used in case of deleting an entry which is a + * part of chain in the hash table.We can't just delete the entry since + * that will break the chain.We need to defragment the tables time to + * time. + * rd - 0 Discard packet upon match. + * - 1 Receive packet upon match. + * Outputs + * address table entry is added/deleted. + * 0 if success. + * -ENOSPC if table full + */ +static int add_del_hash_entry(struct pxa168_eth_private *pep, + unsigned char *mac_addr, + u32 rd, u32 skip, int del) +{ + struct addr_table_entry *entry, *start; + u32 new_high; + u32 new_low; + u32 i; + + new_low = (((mac_addr[1] >> 4) & 0xf) << 15) + | (((mac_addr[1] >> 0) & 0xf) << 11) + | (((mac_addr[0] >> 4) & 0xf) << 7) + | (((mac_addr[0] >> 0) & 0xf) << 3) + | (((mac_addr[3] >> 4) & 0x1) << 31) + | (((mac_addr[3] >> 0) & 0xf) << 27) + | (((mac_addr[2] >> 4) & 0xf) << 23) + | (((mac_addr[2] >> 0) & 0xf) << 19) + | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT) + | HASH_ENTRY_VALID; + + new_high = (((mac_addr[5] >> 4) & 0xf) << 15) + | (((mac_addr[5] >> 0) & 0xf) << 11) + | (((mac_addr[4] >> 4) & 0xf) << 7) + | (((mac_addr[4] >> 0) & 0xf) << 3) + | (((mac_addr[3] >> 5) & 0x7) << 0); + + /* + * Pick the appropriate table, start scanning for free/reusable + * entries at the index obtained by hashing the specified MAC address + */ + start = pep->htpr; + entry = start + hash_function(mac_addr); + for (i = 0; i < HOP_NUMBER; i++) { + if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { + break; + } else { + /* if same address put in same position */ + if (((le32_to_cpu(entry->lo) & 0xfffffff8) == + (new_low & 0xfffffff8)) && + (le32_to_cpu(entry->hi) == new_high)) { + break; + } + } + if (entry == start + 0x7ff) + entry = start; + else + entry++; + } + + if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) && + (le32_to_cpu(entry->hi) != new_high) && del) + return 0; + + if (i == HOP_NUMBER) { + if (!del) { + printk(KERN_INFO "%s: table section is full, need to " + "move to 16kB implementation?\n", + __FILE__); + return -ENOSPC; + } else + return 0; + } + + /* + * Update the selected entry + */ + if (del) { + entry->hi = 0; + entry->lo = 0; + } else { + entry->hi = cpu_to_le32(new_high); + entry->lo = cpu_to_le32(new_low); + } + + return 0; +} + +/* + * ---------------------------------------------------------------------------- + * Create an addressTable entry from MAC address info + * found in the specifed net_device struct + * + * Input : pointer to ethernet interface network device structure + * Output : N/A + */ +static void update_hash_table_mac_address(struct pxa168_eth_private *pep, + unsigned char *oaddr, + unsigned char *addr) +{ + /* Delete old entry */ + if (oaddr) + add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE); + /* Add new entry */ + add_del_hash_entry(pep, addr, 1, 0, HASH_ADD); +} + +static int init_hash_table(struct pxa168_eth_private *pep) +{ + /* + * Hardware expects CPU to build a hash table based on a predefined + * hash function and populate it based on hardware address. The + * location of the hash table is identified by 32-bit pointer stored + * in HTPR internal register. Two possible sizes exists for the hash + * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB + * (16kB of DRAM required (4 x 4 kB banks)).We currently only support + * 1/2kB. + */ + /* TODO: Add support for 8kB hash table and alternative hash + * function.Driver can dynamically switch to them if the 1/2kB hash + * table is full. + */ + if (pep->htpr == NULL) { + pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, + HASH_ADDR_TABLE_SIZE, + &pep->htpr_dma, GFP_KERNEL); + if (pep->htpr == NULL) + return -ENOMEM; + } + memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); + wrl(pep, HTPR, pep->htpr_dma); + return 0; +} + +static void pxa168_eth_set_rx_mode(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct netdev_hw_addr *ha; + u32 val; + + val = rdl(pep, PORT_CONFIG); + if (dev->flags & IFF_PROMISC) + val |= PCR_PM; + else + val &= ~PCR_PM; + wrl(pep, PORT_CONFIG, val); + + /* + * Remove the old list of MAC address and add dev->addr + * and multicast address. + */ + memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); + update_hash_table_mac_address(pep, NULL, dev->dev_addr); + + netdev_for_each_mc_addr(ha, dev) + update_hash_table_mac_address(pep, NULL, ha->addr); +} + +static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned char oldMac[ETH_ALEN]; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EINVAL; + memcpy(oldMac, dev->dev_addr, ETH_ALEN); + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + netif_addr_lock_bh(dev); + update_hash_table_mac_address(pep, oldMac, dev->dev_addr); + netif_addr_unlock_bh(dev); + return 0; +} + +static void eth_port_start(struct net_device *dev) +{ + unsigned int val = 0; + struct pxa168_eth_private *pep = netdev_priv(dev); + int tx_curr_desc, rx_curr_desc; + + /* Perform PHY reset, if there is a PHY. */ + if (pep->phy != NULL) { + struct ethtool_cmd cmd; + + pxa168_get_settings(pep->dev, &cmd); + ethernet_phy_reset(pep); + pxa168_set_settings(pep->dev, &cmd); + } + + /* Assignment of Tx CTRP of given queue */ + tx_curr_desc = pep->tx_curr_desc_q; + wrl(pep, ETH_C_TX_DESC_1, + (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc))); + + /* Assignment of Rx CRDP of given queue */ + rx_curr_desc = pep->rx_curr_desc_q; + wrl(pep, ETH_C_RX_DESC_0, + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); + + wrl(pep, ETH_F_RX_DESC_0, + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); + + /* Clear all interrupts */ + wrl(pep, INT_CAUSE, 0); + + /* Enable all interrupts for receive, transmit and error. */ + wrl(pep, INT_MASK, ALL_INTS); + + val = rdl(pep, PORT_CONFIG); + val |= PCR_EN; + wrl(pep, PORT_CONFIG, val); + + /* Start RX DMA engine */ + val = rdl(pep, SDMA_CMD); + val |= SDMA_CMD_ERD; + wrl(pep, SDMA_CMD, val); +} + +static void eth_port_reset(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned int val = 0; + + /* Stop all interrupts for receive, transmit and error. */ + wrl(pep, INT_MASK, 0); + + /* Clear all interrupts */ + wrl(pep, INT_CAUSE, 0); + + /* Stop RX DMA */ + val = rdl(pep, SDMA_CMD); + val &= ~SDMA_CMD_ERD; /* abort dma command */ + + /* Abort any transmit and receive operations and put DMA + * in idle state. + */ + abort_dma(pep); + + /* Disable port */ + val = rdl(pep, PORT_CONFIG); + val &= ~PCR_EN; + wrl(pep, PORT_CONFIG, val); +} + +/* + * txq_reclaim - Free the tx desc data for completed descriptors + * If force is non-zero, frees uncompleted descriptors as well + */ +static int txq_reclaim(struct net_device *dev, int force) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct tx_desc *desc; + u32 cmd_sts; + struct sk_buff *skb; + int tx_index; + dma_addr_t addr; + int count; + int released = 0; + + netif_tx_lock(dev); + + pep->work_todo &= ~WORK_TX_DONE; + while (pep->tx_desc_count > 0) { + tx_index = pep->tx_used_desc_q; + desc = &pep->p_tx_desc_area[tx_index]; + cmd_sts = desc->cmd_sts; + if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) { + if (released > 0) { + goto txq_reclaim_end; + } else { + released = -1; + goto txq_reclaim_end; + } + } + pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size; + pep->tx_desc_count--; + addr = desc->buf_ptr; + count = desc->byte_cnt; + skb = pep->tx_skb[tx_index]; + if (skb) + pep->tx_skb[tx_index] = NULL; + + if (cmd_sts & TX_ERROR) { + if (net_ratelimit()) + printk(KERN_ERR "%s: Error in TX\n", dev->name); + dev->stats.tx_errors++; + } + dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); + if (skb) + dev_kfree_skb_irq(skb); + released++; + } +txq_reclaim_end: + netif_tx_unlock(dev); + return released; +} + +static void pxa168_eth_tx_timeout(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + printk(KERN_INFO "%s: TX timeout desc_count %d\n", + dev->name, pep->tx_desc_count); + + schedule_work(&pep->tx_timeout_task); +} + +static void pxa168_eth_tx_timeout_task(struct work_struct *work) +{ + struct pxa168_eth_private *pep = container_of(work, + struct pxa168_eth_private, + tx_timeout_task); + struct net_device *dev = pep->dev; + pxa168_eth_stop(dev); + pxa168_eth_open(dev); +} + +static int rxq_process(struct net_device *dev, int budget) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned int received_packets = 0; + struct sk_buff *skb; + + while (budget-- > 0) { + int rx_next_curr_desc, rx_curr_desc, rx_used_desc; + struct rx_desc *rx_desc; + unsigned int cmd_sts; + + /* Do not process Rx ring in case of Rx ring resource error */ + if (pep->rx_resource_err) + break; + rx_curr_desc = pep->rx_curr_desc_q; + rx_used_desc = pep->rx_used_desc_q; + rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; + cmd_sts = rx_desc->cmd_sts; + rmb(); + if (cmd_sts & (BUF_OWNED_BY_DMA)) + break; + skb = pep->rx_skb[rx_curr_desc]; + pep->rx_skb[rx_curr_desc] = NULL; + + rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size; + pep->rx_curr_desc_q = rx_next_curr_desc; + + /* Rx descriptors exhausted. */ + /* Set the Rx ring resource error flag */ + if (rx_next_curr_desc == rx_used_desc) + pep->rx_resource_err = 1; + pep->rx_desc_count--; + dma_unmap_single(NULL, rx_desc->buf_ptr, + rx_desc->buf_size, + DMA_FROM_DEVICE); + received_packets++; + /* + * Update statistics. + * Note byte count includes 4 byte CRC count + */ + stats->rx_packets++; + stats->rx_bytes += rx_desc->byte_cnt; + /* + * In case received a packet without first / last bits on OR + * the error summary bit is on, the packets needs to be droped. + */ + if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) + || (cmd_sts & RX_ERROR)) { + + stats->rx_dropped++; + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) { + if (net_ratelimit()) + printk(KERN_ERR + "%s: Rx pkt on multiple desc\n", + dev->name); + } + if (cmd_sts & RX_ERROR) + stats->rx_errors++; + dev_kfree_skb_irq(skb); + } else { + /* + * The -4 is for the CRC in the trailer of the + * received packet + */ + skb_put(skb, rx_desc->byte_cnt - 4); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + } + } + /* Fill RX ring with skb's */ + rxq_refill(dev); + return received_packets; +} + +static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, + struct net_device *dev) +{ + u32 icr; + int ret = 0; + + icr = rdl(pep, INT_CAUSE); + if (icr == 0) + return IRQ_NONE; + + wrl(pep, INT_CAUSE, ~icr); + if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) { + pep->work_todo |= WORK_TX_DONE; + ret = 1; + } + if (icr & ICR_RXBUF) + ret = 1; + if (icr & ICR_MII_CH) { + pep->work_todo |= WORK_LINK; + ret = 1; + } + return ret; +} + +static void handle_link_event(struct pxa168_eth_private *pep) +{ + struct net_device *dev = pep->dev; + u32 port_status; + int speed; + int duplex; + int fc; + + port_status = rdl(pep, PORT_STATUS); + if (!(port_status & LINK_UP)) { + if (netif_carrier_ok(dev)) { + printk(KERN_INFO "%s: link down\n", dev->name); + netif_carrier_off(dev); + txq_reclaim(dev, 1); + } + return; + } + if (port_status & PORT_SPEED_100) + speed = 100; + else + speed = 10; + + duplex = (port_status & FULL_DUPLEX) ? 1 : 0; + fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; + printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " + "flow control %sabled\n", dev->name, + speed, duplex ? "full" : "half", fc ? "en" : "dis"); + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); +} + +static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (unlikely(!pxa168_eth_collect_events(pep, dev))) + return IRQ_NONE; + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + napi_schedule(&pep->napi); + return IRQ_HANDLED; +} + +static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep) +{ + int skb_size; + + /* + * Reserve 2+14 bytes for an ethernet header (the hardware + * automatically prepends 2 bytes of dummy data to each + * received packet), 16 bytes for up to four VLAN tags, and + * 4 bytes for the trailing FCS -- 36 bytes total. + */ + skb_size = pep->dev->mtu + 36; + + /* + * Make sure that the skb size is a multiple of 8 bytes, as + * the lower three bits of the receive descriptor's buffer + * size field are ignored by the hardware. + */ + pep->skb_size = (skb_size + 7) & ~7; + + /* + * If NET_SKB_PAD is smaller than a cache line, + * netdev_alloc_skb() will cause skb->data to be misaligned + * to a cache line boundary. If this is the case, include + * some extra space to allow re-aligning the data area. + */ + pep->skb_size += SKB_DMA_REALIGN; + +} + +static int set_port_config_ext(struct pxa168_eth_private *pep) +{ + int skb_size; + + pxa168_eth_recalc_skb_size(pep); + if (pep->skb_size <= 1518) + skb_size = PCXR_MFL_1518; + else if (pep->skb_size <= 1536) + skb_size = PCXR_MFL_1536; + else if (pep->skb_size <= 2048) + skb_size = PCXR_MFL_2048; + else + skb_size = PCXR_MFL_64K; + + /* Extended Port Configuration */ + wrl(pep, + PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */ + PCXR_DSCP_EN | /* Enable DSCP in IP */ + skb_size | PCXR_FLP | /* do not force link pass */ + PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ + + return 0; +} + +static int pxa168_init_hw(struct pxa168_eth_private *pep) +{ + int err = 0; + + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + wrl(pep, INT_CAUSE, 0); + /* Write to ICR to clear interrupts. */ + wrl(pep, INT_W_CLEAR, 0); + /* Abort any transmit and receive operations and put DMA + * in idle state. + */ + abort_dma(pep); + /* Initialize address hash table */ + err = init_hash_table(pep); + if (err) + return err; + /* SDMA configuration */ + wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */ + SDCR_RIFB | /* Rx interrupt on frame */ + SDCR_BLMT | /* Little endian transmit */ + SDCR_BLMR | /* Little endian receive */ + SDCR_RC_MAX_RETRANS); /* Max retransmit count */ + /* Port Configuration */ + wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */ + set_port_config_ext(pep); + + return err; +} + +static int rxq_init(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct rx_desc *p_rx_desc; + int size = 0, i = 0; + int rx_desc_num = pep->rx_ring_size; + + /* Allocate RX skb rings */ + pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, + GFP_KERNEL); + if (!pep->rx_skb) { + printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name); + return -ENOMEM; + } + /* Allocate RX ring */ + pep->rx_desc_count = 0; + size = pep->rx_ring_size * sizeof(struct rx_desc); + pep->rx_desc_area_size = size; + pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, + &pep->rx_desc_dma, GFP_KERNEL); + if (!pep->p_rx_desc_area) { + printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n", + dev->name, size); + goto out; + } + memset((void *)pep->p_rx_desc_area, 0, size); + /* initialize the next_desc_ptr links in the Rx descriptors ring */ + p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area; + for (i = 0; i < rx_desc_num; i++) { + p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + + ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); + } + /* Save Rx desc pointer to driver struct. */ + pep->rx_curr_desc_q = 0; + pep->rx_used_desc_q = 0; + pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); + return 0; +out: + kfree(pep->rx_skb); + return -ENOMEM; +} + +static void rxq_deinit(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int curr; + + /* Free preallocated skb's on RX rings */ + for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { + if (pep->rx_skb[curr]) { + dev_kfree_skb(pep->rx_skb[curr]); + pep->rx_desc_count--; + } + } + if (pep->rx_desc_count) + printk(KERN_ERR + "Error in freeing Rx Ring. %d skb's still\n", + pep->rx_desc_count); + /* Free RX ring */ + if (pep->p_rx_desc_area) + dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, + pep->p_rx_desc_area, pep->rx_desc_dma); + kfree(pep->rx_skb); +} + +static int txq_init(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct tx_desc *p_tx_desc; + int size = 0, i = 0; + int tx_desc_num = pep->tx_ring_size; + + pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, + GFP_KERNEL); + if (!pep->tx_skb) { + printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name); + return -ENOMEM; + } + /* Allocate TX ring */ + pep->tx_desc_count = 0; + size = pep->tx_ring_size * sizeof(struct tx_desc); + pep->tx_desc_area_size = size; + pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, + &pep->tx_desc_dma, GFP_KERNEL); + if (!pep->p_tx_desc_area) { + printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", + dev->name, size); + goto out; + } + memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); + /* Initialize the next_desc_ptr links in the Tx descriptors ring */ + p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area; + for (i = 0; i < tx_desc_num; i++) { + p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + + ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); + } + pep->tx_curr_desc_q = 0; + pep->tx_used_desc_q = 0; + pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); + return 0; +out: + kfree(pep->tx_skb); + return -ENOMEM; +} + +static void txq_deinit(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + /* Free outstanding skb's on TX ring */ + txq_reclaim(dev, 1); + BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q); + /* Free TX ring */ + if (pep->p_tx_desc_area) + dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size, + pep->p_tx_desc_area, pep->tx_desc_dma); + kfree(pep->tx_skb); +} + +static int pxa168_eth_open(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int err; + + err = request_irq(dev->irq, pxa168_eth_int_handler, + IRQF_DISABLED, dev->name, dev); + if (err) { + dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); + return -EAGAIN; + } + pep->rx_resource_err = 0; + err = rxq_init(dev); + if (err != 0) + goto out_free_irq; + err = txq_init(dev); + if (err != 0) + goto out_free_rx_skb; + pep->rx_used_desc_q = 0; + pep->rx_curr_desc_q = 0; + + /* Fill RX ring with skb's */ + rxq_refill(dev); + pep->rx_used_desc_q = 0; + pep->rx_curr_desc_q = 0; + netif_carrier_off(dev); + eth_port_start(dev); + napi_enable(&pep->napi); + return 0; +out_free_rx_skb: + rxq_deinit(dev); +out_free_irq: + free_irq(dev->irq, dev); + return err; +} + +static int pxa168_eth_stop(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + eth_port_reset(dev); + + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + wrl(pep, INT_CAUSE, 0); + /* Write to ICR to clear interrupts. */ + wrl(pep, INT_W_CLEAR, 0); + napi_disable(&pep->napi); + del_timer_sync(&pep->timeout); + netif_carrier_off(dev); + free_irq(dev->irq, dev); + rxq_deinit(dev); + txq_deinit(dev); + + return 0; +} + +static int pxa168_eth_change_mtu(struct net_device *dev, int mtu) +{ + int retval; + struct pxa168_eth_private *pep = netdev_priv(dev); + + if ((mtu > 9500) || (mtu < 68)) + return -EINVAL; + + dev->mtu = mtu; + retval = set_port_config_ext(pep); + + if (!netif_running(dev)) + return 0; + + /* + * Stop and then re-open the interface. This will allocate RX + * skbs of the new MTU. + * There is a possible danger that the open will not succeed, + * due to memory being full. + */ + pxa168_eth_stop(dev); + if (pxa168_eth_open(dev)) { + dev_printk(KERN_ERR, &dev->dev, + "fatal error on re-opening device after " + "MTU change\n"); + } + + return 0; +} + +static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep) +{ + int tx_desc_curr; + + tx_desc_curr = pep->tx_curr_desc_q; + pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size; + BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q); + pep->tx_desc_count++; + + return tx_desc_curr; +} + +static int pxa168_rx_poll(struct napi_struct *napi, int budget) +{ + struct pxa168_eth_private *pep = + container_of(napi, struct pxa168_eth_private, napi); + struct net_device *dev = pep->dev; + int work_done = 0; + + if (unlikely(pep->work_todo & WORK_LINK)) { + pep->work_todo &= ~(WORK_LINK); + handle_link_event(pep); + } + /* + * We call txq_reclaim every time since in NAPI interupts are disabled + * and due to this we miss the TX_DONE interrupt,which is not updated in + * interrupt status register. + */ + txq_reclaim(dev, 0); + if (netif_queue_stopped(dev) + && pep->tx_ring_size - pep->tx_desc_count > 1) { + netif_wake_queue(dev); + } + work_done = rxq_process(dev, budget); + if (work_done < budget) { + napi_complete(napi); + wrl(pep, INT_MASK, ALL_INTS); + } + + return work_done; +} + +static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct tx_desc *desc; + int tx_index; + int length; + + tx_index = eth_alloc_tx_desc_index(pep); + desc = &pep->p_tx_desc_area[tx_index]; + length = skb->len; + pep->tx_skb[tx_index] = skb; + desc->byte_cnt = length; + desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); + + skb_tx_timestamp(skb); + + wmb(); + desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | + TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; + wmb(); + wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); + + stats->tx_bytes += length; + stats->tx_packets++; + dev->trans_start = jiffies; + if (pep->tx_ring_size - pep->tx_desc_count <= 1) { + /* We handled the current skb, but now we are out of space.*/ + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; +} + +static int smi_wait_ready(struct pxa168_eth_private *pep) +{ + int i = 0; + + /* wait for the SMI register to become available */ + for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) { + if (i == PHY_WAIT_ITERATIONS) + return -ETIMEDOUT; + msleep(10); + } + + return 0; +} + +static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct pxa168_eth_private *pep = bus->priv; + int i = 0; + int val; + + if (smi_wait_ready(pep)) { + printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); + /* now wait for the data to be valid */ + for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { + if (i == PHY_WAIT_ITERATIONS) { + printk(KERN_WARNING + "pxa168_eth: SMI bus read not valid\n"); + return -ENODEV; + } + msleep(10); + } + + return val & 0xffff; +} + +static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) +{ + struct pxa168_eth_private *pep = bus->priv; + + if (smi_wait_ready(pep)) { + printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | + SMI_OP_W | (value & 0xffff)); + + if (smi_wait_ready(pep)) { + printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + if (pep->phy != NULL) + return phy_mii_ioctl(pep->phy, ifr, cmd); + + return -EOPNOTSUPP; +} + +static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr) +{ + struct mii_bus *bus = pep->smi_bus; + struct phy_device *phydev; + int start; + int num; + int i; + + if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) { + /* Scan entire range */ + start = ethernet_phy_get(pep); + num = 32; + } else { + /* Use phy addr specific to platform */ + start = phy_addr & 0x1f; + num = 1; + } + phydev = NULL; + for (i = 0; i < num; i++) { + int addr = (start + i) & 0x1f; + if (bus->phy_map[addr] == NULL) + mdiobus_scan(bus, addr); + + if (phydev == NULL) { + phydev = bus->phy_map[addr]; + if (phydev != NULL) + ethernet_phy_set_addr(pep, addr); + } + } + + return phydev; +} + +static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex) +{ + struct phy_device *phy = pep->phy; + ethernet_phy_reset(pep); + + phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII); + + if (speed == 0) { + phy->autoneg = AUTONEG_ENABLE; + phy->speed = 0; + phy->duplex = 0; + phy->supported &= PHY_BASIC_FEATURES; + phy->advertising = phy->supported | ADVERTISED_Autoneg; + } else { + phy->autoneg = AUTONEG_DISABLE; + phy->advertising = 0; + phy->speed = speed; + phy->duplex = duplex; + } + phy_start_aneg(phy); +} + +static int ethernet_phy_setup(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (pep->pd->init) + pep->pd->init(); + pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f); + if (pep->phy != NULL) + phy_init(pep, pep->pd->speed, pep->pd->duplex); + update_hash_table_mac_address(pep, NULL, dev->dev_addr); + + return 0; +} + +static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int err; + + err = phy_read_status(pep->phy); + if (err == 0) + err = phy_ethtool_gset(pep->phy, cmd); + + return err; +} + +static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + return phy_ethtool_sset(pep->phy, cmd); +} + +static void pxa168_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strncpy(info->driver, DRIVER_NAME, 32); + strncpy(info->version, DRIVER_VERSION, 32); + strncpy(info->fw_version, "N/A", 32); + strncpy(info->bus_info, "N/A", 32); +} + +static const struct ethtool_ops pxa168_ethtool_ops = { + .get_settings = pxa168_get_settings, + .set_settings = pxa168_set_settings, + .get_drvinfo = pxa168_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops pxa168_eth_netdev_ops = { + .ndo_open = pxa168_eth_open, + .ndo_stop = pxa168_eth_stop, + .ndo_start_xmit = pxa168_eth_start_xmit, + .ndo_set_rx_mode = pxa168_eth_set_rx_mode, + .ndo_set_mac_address = pxa168_eth_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = pxa168_eth_do_ioctl, + .ndo_change_mtu = pxa168_eth_change_mtu, + .ndo_tx_timeout = pxa168_eth_tx_timeout, +}; + +static int pxa168_eth_probe(struct platform_device *pdev) +{ + struct pxa168_eth_private *pep = NULL; + struct net_device *dev = NULL; + struct resource *res; + struct clk *clk; + int err; + + printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); + + clk = clk_get(&pdev->dev, "MFUCLK"); + if (IS_ERR(clk)) { + printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n", + DRIVER_NAME); + return -ENODEV; + } + clk_enable(clk); + + dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); + if (!dev) { + err = -ENOMEM; + goto err_clk; + } + + platform_set_drvdata(pdev, dev); + pep = netdev_priv(dev); + pep->dev = dev; + pep->clk = clk; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + err = -ENODEV; + goto err_netdev; + } + pep->base = ioremap(res->start, resource_size(res)); + if (pep->base == NULL) { + err = -ENOMEM; + goto err_netdev; + } + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + BUG_ON(!res); + dev->irq = res->start; + dev->netdev_ops = &pxa168_eth_netdev_ops; + dev->watchdog_timeo = 2 * HZ; + dev->base_addr = 0; + SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops); + + INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); + + printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); + random_ether_addr(dev->dev_addr); + + pep->pd = pdev->dev.platform_data; + pep->rx_ring_size = NUM_RX_DESCS; + if (pep->pd->rx_queue_size) + pep->rx_ring_size = pep->pd->rx_queue_size; + + pep->tx_ring_size = NUM_TX_DESCS; + if (pep->pd->tx_queue_size) + pep->tx_ring_size = pep->pd->tx_queue_size; + + pep->port_num = pep->pd->port_number; + /* Hardware supports only 3 ports */ + BUG_ON(pep->port_num > 2); + netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); + + memset(&pep->timeout, 0, sizeof(struct timer_list)); + init_timer(&pep->timeout); + pep->timeout.function = rxq_refill_timer_wrapper; + pep->timeout.data = (unsigned long)pep; + + pep->smi_bus = mdiobus_alloc(); + if (pep->smi_bus == NULL) { + err = -ENOMEM; + goto err_base; + } + pep->smi_bus->priv = pep; + pep->smi_bus->name = "pxa168_eth smi"; + pep->smi_bus->read = pxa168_smi_read; + pep->smi_bus->write = pxa168_smi_write; + snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); + pep->smi_bus->parent = &pdev->dev; + pep->smi_bus->phy_mask = 0xffffffff; + err = mdiobus_register(pep->smi_bus); + if (err) + goto err_free_mdio; + + pxa168_init_hw(pep); + err = ethernet_phy_setup(dev); + if (err) + goto err_mdiobus; + SET_NETDEV_DEV(dev, &pdev->dev); + err = register_netdev(dev); + if (err) + goto err_mdiobus; + return 0; + +err_mdiobus: + mdiobus_unregister(pep->smi_bus); +err_free_mdio: + mdiobus_free(pep->smi_bus); +err_base: + iounmap(pep->base); +err_netdev: + free_netdev(dev); +err_clk: + clk_disable(clk); + clk_put(clk); + return err; +} + +static int pxa168_eth_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (pep->htpr) { + dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, + pep->htpr, pep->htpr_dma); + pep->htpr = NULL; + } + if (pep->clk) { + clk_disable(pep->clk); + clk_put(pep->clk); + pep->clk = NULL; + } + if (pep->phy != NULL) + phy_detach(pep->phy); + + iounmap(pep->base); + pep->base = NULL; + mdiobus_unregister(pep->smi_bus); + mdiobus_free(pep->smi_bus); + unregister_netdev(dev); + cancel_work_sync(&pep->tx_timeout_task); + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static void pxa168_eth_shutdown(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + eth_port_reset(dev); +} + +#ifdef CONFIG_PM +static int pxa168_eth_resume(struct platform_device *pdev) +{ + return -ENOSYS; +} + +static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state) +{ + return -ENOSYS; +} + +#else +#define pxa168_eth_resume NULL +#define pxa168_eth_suspend NULL +#endif + +static struct platform_driver pxa168_eth_driver = { + .probe = pxa168_eth_probe, + .remove = pxa168_eth_remove, + .shutdown = pxa168_eth_shutdown, + .resume = pxa168_eth_resume, + .suspend = pxa168_eth_suspend, + .driver = { + .name = DRIVER_NAME, + }, +}; + +static int __init pxa168_init_module(void) +{ + return platform_driver_register(&pxa168_eth_driver); +} + +static void __exit pxa168_cleanup_module(void) +{ + platform_driver_unregister(&pxa168_eth_driver); +} + +module_init(pxa168_init_module); +module_exit(pxa168_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); +MODULE_ALIAS("platform:pxa168_eth"); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c new file mode 100644 index 000000000000..98ec614c5690 --- /dev/null +++ b/drivers/net/ethernet/marvell/skge.c @@ -0,0 +1,4133 @@ +/* + * New driver for Marvell Yukon chipset and SysKonnect Gigabit + * Ethernet adapters. Based on earlier sk98lin, e100 and + * FreeBSD if_sk drivers. + * + * This driver intentionally does not support all the features + * of the original driver such as link fail-over and link management because + * those should be done at higher levels. + * + * Copyright (C) 2004, 2005 Stephen Hemminger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "skge.h" + +#define DRV_NAME "skge" +#define DRV_VERSION "1.14" + +#define DEFAULT_TX_RING_SIZE 128 +#define DEFAULT_RX_RING_SIZE 512 +#define MAX_TX_RING_SIZE 1024 +#define TX_LOW_WATER (MAX_SKB_FRAGS + 1) +#define MAX_RX_RING_SIZE 4096 +#define RX_COPY_THRESHOLD 128 +#define RX_BUF_SIZE 1536 +#define PHY_RETRIES 1000 +#define ETH_JUMBO_MTU 9000 +#define TX_WATCHDOG (5 * HZ) +#define NAPI_WEIGHT 64 +#define BLINK_MS 250 +#define LINK_HZ HZ + +#define SKGE_EEPROM_MAGIC 0x9933aabb + + +MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); +MODULE_AUTHOR("Stephen Hemminger "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN); + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */ + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */ +#ifdef CONFIG_SKGE_GENESIS + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */ +#endif + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ + { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */ + { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */ + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, skge_id_table); + +static int skge_up(struct net_device *dev); +static int skge_down(struct net_device *dev); +static void skge_phy_reset(struct skge_port *skge); +static void skge_tx_clean(struct net_device *dev); +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static void genesis_get_stats(struct skge_port *skge, u64 *data); +static void yukon_get_stats(struct skge_port *skge, u64 *data); +static void yukon_init(struct skge_hw *hw, int port); +static void genesis_mac_init(struct skge_hw *hw, int port); +static void genesis_link_up(struct skge_port *skge); +static void skge_set_multicast(struct net_device *dev); + +/* Avoid conditionals by using array */ +static const int txqaddr[] = { Q_XA1, Q_XA2 }; +static const int rxqaddr[] = { Q_R1, Q_R2 }; +static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; +static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; +static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; +static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; + +static inline bool is_genesis(const struct skge_hw *hw) +{ +#ifdef CONFIG_SKGE_GENESIS + return hw->chip_id == CHIP_ID_GENESIS; +#else + return false; +#endif +} + +static int skge_get_regs_len(struct net_device *dev) +{ + return 0x4000; +} + +/* + * Returns copy of whole control register region + * Note: skip RAM address register because accessing it will + * cause bus hangs! + */ +static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + const struct skge_port *skge = netdev_priv(dev); + const void __iomem *io = skge->hw->regs; + + regs->version = 1; + memset(p, 0, regs->len); + memcpy_fromio(p, io, B3_RAM_ADDR); + + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); +} + +/* Wake on Lan only supported on Yukon chips with rev 1 or above */ +static u32 wol_supported(const struct skge_hw *hw) +{ + if (is_genesis(hw)) + return 0; + + if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + return 0; + + return WAKE_MAGIC | WAKE_PHY; +} + +static void skge_wol_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + skge_write16(hw, B0_CTST, CS_RST_CLR); + skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); + + /* Turn on Vaux */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + u32 reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_SET); + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_CLR); + + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + /* Force to 10/100 skge_reset will re-enable on resume */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, + (PHY_AN_100FULL | PHY_AN_100HALF | + PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA)); + /* no 1000 HD/FD */ + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_CTRL, + PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | + PHY_CT_RE_CFG | PHY_CT_DUP_MD); + + + /* Set GMAC to no flow control and auto update for speed/duplex */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| + GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); + + /* Set WOL address */ + memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), + skge->netdev->dev_addr, ETH_ALEN); + + /* Turn on appropriate WOL control bits */ + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); + ctrl = 0; + if (skge->wol & WAKE_PHY) + ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; + + if (skge->wol & WAKE_MAGIC) + ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; + + ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); + + /* block receiver */ + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); +} + +static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + + wol->supported = wol_supported(skge->hw); + wol->wolopts = skge->wol; +} + +static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + if ((wol->wolopts & ~wol_supported(hw)) || + !device_can_wakeup(&hw->pdev->dev)) + return -EOPNOTSUPP; + + skge->wol = wol->wolopts; + + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + + return 0; +} + +/* Determine supported/advertised modes based on hardware. + * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx + */ +static u32 skge_supported_modes(const struct skge_hw *hw) +{ + u32 supported; + + if (hw->copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + + if (is_genesis(hw)) + supported &= ~(SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); + + else if (hw->chip_id == CHIP_ID_YUKON) + supported &= ~SUPPORTED_1000baseT_Half; + } else + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + return supported; +} + +static int skge_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = skge_supported_modes(hw); + + if (hw->copper) { + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy_addr; + } else + ecmd->port = PORT_FIBRE; + + ecmd->advertising = skge->advertising; + ecmd->autoneg = skge->autoneg; + ethtool_cmd_speed_set(ecmd, skge->speed); + ecmd->duplex = skge->duplex; + return 0; +} + +static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + u32 supported = skge_supported_modes(hw); + int err = 0; + + if (ecmd->autoneg == AUTONEG_ENABLE) { + ecmd->advertising = supported; + skge->duplex = -1; + skge->speed = -1; + } else { + u32 setting; + u32 speed = ethtool_cmd_speed(ecmd); + + switch (speed) { + case SPEED_1000: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_1000baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_1000baseT_Half; + else + return -EINVAL; + break; + case SPEED_100: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_100baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_100baseT_Half; + else + return -EINVAL; + break; + + case SPEED_10: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_10baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_10baseT_Half; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((setting & supported) == 0) + return -EINVAL; + + skge->speed = speed; + skge->duplex = ecmd->duplex; + } + + skge->autoneg = ecmd->autoneg; + skge->advertising = ecmd->advertising; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +static void skge_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct skge_port *skge = netdev_priv(dev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); + strcpy(info->bus_info, pci_name(skge->hw->pdev)); +} + +static const struct skge_stat { + char name[ETH_GSTRING_LEN]; + u16 xmac_offset; + u16 gma_offset; +} skge_stats[] = { + { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, + { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, + + { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, + { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, + { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, + { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, + { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, + { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, + { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, + { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, + + { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, + { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, + { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, + { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, + { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, + { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, + + { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, + { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, + { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, +}; + +static int skge_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(skge_stats); + default: + return -EOPNOTSUPP; + } +} + +static void skge_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); +} + +/* Use hardware MIB variables for critical path statistics and + * transmit feedback not reported at interrupt. + * Other errors are accounted for in interrupt handler. + */ +static struct net_device_stats *skge_get_stats(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u64 data[ARRAY_SIZE(skge_stats)]; + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); + + dev->stats.tx_bytes = data[0]; + dev->stats.rx_bytes = data[1]; + dev->stats.tx_packets = data[2] + data[4] + data[6]; + dev->stats.rx_packets = data[3] + data[5] + data[7]; + dev->stats.multicast = data[3] + data[5]; + dev->stats.collisions = data[10]; + dev->stats.tx_aborted_errors = data[12]; + + return &dev->stats; +} + +static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(skge_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + skge_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static void skge_get_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + + p->rx_max_pending = MAX_RX_RING_SIZE; + p->tx_max_pending = MAX_TX_RING_SIZE; + p->rx_mini_max_pending = 0; + p->rx_jumbo_max_pending = 0; + + p->rx_pending = skge->rx_ring.count; + p->tx_pending = skge->tx_ring.count; + p->rx_mini_pending = 0; + p->rx_jumbo_pending = 0; +} + +static int skge_set_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + int err = 0; + + if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || + p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) + return -EINVAL; + + skge->rx_ring.count = p->rx_pending; + skge->tx_ring.count = p->tx_pending; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) + dev_close(dev); + } + + return err; +} + +static u32 skge_get_msglevel(struct net_device *netdev) +{ + struct skge_port *skge = netdev_priv(netdev); + return skge->msg_enable; +} + +static void skge_set_msglevel(struct net_device *netdev, u32 value) +{ + struct skge_port *skge = netdev_priv(netdev); + skge->msg_enable = value; +} + +static int skge_nway_reset(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) + return -EINVAL; + + skge_phy_reset(skge); + return 0; +} + +static void skge_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + + ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || + (skge->flow_control == FLOW_MODE_SYM_OR_REM)); + ecmd->tx_pause = (ecmd->rx_pause || + (skge->flow_control == FLOW_MODE_LOC_SEND)); + + ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; +} + +static int skge_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct ethtool_pauseparam old; + int err = 0; + + skge_get_pauseparam(dev, &old); + + if (ecmd->autoneg != old.autoneg) + skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; + else { + if (ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYMMETRIC; + else if (ecmd->rx_pause && !ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYM_OR_REM; + else if (!ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_LOC_SEND; + else + skge->flow_control = FLOW_MODE_NONE; + } + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +/* Chip internal frequency for clock calculations */ +static inline u32 hwkhz(const struct skge_hw *hw) +{ + return is_genesis(hw) ? 53125 : 78125; +} + +/* Chip HZ to microseconds */ +static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) +{ + return (ticks * 1000) / hwkhz(hw); +} + +/* Microseconds to chip HZ */ +static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) +{ + return hwkhz(hw) * usec / 1000; +} + +static int skge_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + ecmd->rx_coalesce_usecs = 0; + ecmd->tx_coalesce_usecs = 0; + + if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { + u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); + u32 msk = skge_read32(hw, B2_IRQM_MSK); + + if (msk & rxirqmask[port]) + ecmd->rx_coalesce_usecs = delay; + if (msk & txirqmask[port]) + ecmd->tx_coalesce_usecs = delay; + } + + return 0; +} + +/* Note: interrupt timer is per board, but can turn on/off per port */ +static int skge_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 msk = skge_read32(hw, B2_IRQM_MSK); + u32 delay = 25; + + if (ecmd->rx_coalesce_usecs == 0) + msk &= ~rxirqmask[port]; + else if (ecmd->rx_coalesce_usecs < 25 || + ecmd->rx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= rxirqmask[port]; + delay = ecmd->rx_coalesce_usecs; + } + + if (ecmd->tx_coalesce_usecs == 0) + msk &= ~txirqmask[port]; + else if (ecmd->tx_coalesce_usecs < 25 || + ecmd->tx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= txirqmask[port]; + delay = min(delay, ecmd->rx_coalesce_usecs); + } + + skge_write32(hw, B2_IRQM_MSK, msk); + if (msk == 0) + skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); + else { + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + } + return 0; +} + +enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; +static void skge_led(struct skge_port *skge, enum led_mode mode) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + switch (mode) { + case LED_MODE_OFF: + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); + else { + skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); + } + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); + break; + + case LED_MODE_ON: + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); + + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + + break; + + case LED_MODE_TST: + skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); + else { + skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + } + + } + } else { + switch (mode) { + case LED_MODE_OFF: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_OFF) | + PHY_M_LED_MO_10(MO_LED_OFF) | + PHY_M_LED_MO_100(MO_LED_OFF) | + PHY_M_LED_MO_1000(MO_LED_OFF) | + PHY_M_LED_MO_RX(MO_LED_OFF)); + break; + case LED_MODE_ON: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, + PHY_M_LED_PULS_DUR(PULS_170MS) | + PHY_M_LED_BLINK_RT(BLINK_84MS) | + PHY_M_LEDC_TX_CTRL | + PHY_M_LEDC_DP_CTRL); + + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_RX(MO_LED_OFF) | + (skge->speed == SPEED_100 ? + PHY_M_LED_MO_100(MO_LED_ON) : 0)); + break; + case LED_MODE_TST: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_ON) | + PHY_M_LED_MO_10(MO_LED_ON) | + PHY_M_LED_MO_100(MO_LED_ON) | + PHY_M_LED_MO_1000(MO_LED_ON) | + PHY_M_LED_MO_RX(MO_LED_ON)); + } + } + spin_unlock_bh(&hw->phy_lock); +} + +/* blink LED's for finding board */ +static int skge_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct skge_port *skge = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; /* cycle on/off twice per second */ + + case ETHTOOL_ID_ON: + skge_led(skge, LED_MODE_TST); + break; + + case ETHTOOL_ID_OFF: + skge_led(skge, LED_MODE_OFF); + break; + + case ETHTOOL_ID_INACTIVE: + /* back to regular LED state */ + skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); + } + + return 0; +} + +static int skge_get_eeprom_len(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u32 reg2; + + pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); + return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); +} + +static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) +{ + u32 val; + + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (!(offset & PCI_VPD_ADDR_F)); + + pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + return val; +} + +static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) +{ + pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, + offset | PCI_VPD_ADDR_F); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (offset & PCI_VPD_ADDR_F); +} + +static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + eeprom->magic = SKGE_EEPROM_MAGIC; + + while (length > 0) { + u32 val = skge_vpd_read(pdev, cap, offset); + int n = min_t(int, length, sizeof(val)); + + memcpy(data, &val, n); + length -= n; + data += n; + offset += n; + } + return 0; +} + +static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + if (eeprom->magic != SKGE_EEPROM_MAGIC) + return -EINVAL; + + while (length > 0) { + u32 val; + int n = min_t(int, length, sizeof(val)); + + if (n < sizeof(val)) + val = skge_vpd_read(pdev, cap, offset); + memcpy(&val, data, n); + + skge_vpd_write(pdev, cap, offset, val); + + length -= n; + data += n; + offset += n; + } + return 0; +} + +static const struct ethtool_ops skge_ethtool_ops = { + .get_settings = skge_get_settings, + .set_settings = skge_set_settings, + .get_drvinfo = skge_get_drvinfo, + .get_regs_len = skge_get_regs_len, + .get_regs = skge_get_regs, + .get_wol = skge_get_wol, + .set_wol = skge_set_wol, + .get_msglevel = skge_get_msglevel, + .set_msglevel = skge_set_msglevel, + .nway_reset = skge_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = skge_get_eeprom_len, + .get_eeprom = skge_get_eeprom, + .set_eeprom = skge_set_eeprom, + .get_ringparam = skge_get_ring_param, + .set_ringparam = skge_set_ring_param, + .get_pauseparam = skge_get_pauseparam, + .set_pauseparam = skge_set_pauseparam, + .get_coalesce = skge_get_coalesce, + .set_coalesce = skge_set_coalesce, + .get_strings = skge_get_strings, + .set_phys_id = skge_set_phys_id, + .get_sset_count = skge_get_sset_count, + .get_ethtool_stats = skge_get_ethtool_stats, +}; + +/* + * Allocate ring elements and chain them together + * One-to-one association of board descriptors with ring elements + */ +static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) +{ + struct skge_tx_desc *d; + struct skge_element *e; + int i; + + ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); + if (!ring->start) + return -ENOMEM; + + for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { + e->desc = d; + if (i == ring->count - 1) { + e->next = ring->start; + d->next_offset = base; + } else { + e->next = e + 1; + d->next_offset = base + (i+1) * sizeof(*d); + } + } + ring->to_use = ring->to_clean = ring->start; + + return 0; +} + +/* Allocate and setup a new buffer for receiving */ +static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) +{ + struct skge_rx_desc *rd = e->desc; + u64 map; + + map = pci_map_single(skge->hw->pdev, skb->data, bufsize, + PCI_DMA_FROMDEVICE); + + rd->dma_lo = map; + rd->dma_hi = map >> 32; + e->skb = skb; + rd->csum1_start = ETH_HLEN; + rd->csum2_start = ETH_HLEN; + rd->csum1 = 0; + rd->csum2 = 0; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, bufsize); +} + +/* Resume receiving using existing skb, + * Note: DMA address is not changed by chip. + * MTU not changed while receiver active. + */ +static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) +{ + struct skge_rx_desc *rd = e->desc; + + rd->csum2 = 0; + rd->csum2_start = ETH_HLEN; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; +} + + +/* Free all buffers in receive ring, assumes receiver stopped */ +static void skge_rx_clean(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct skge_rx_desc *rd = e->desc; + rd->control = 0; + if (e->skb) { + pci_unmap_single(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(e->skb); + e->skb = NULL; + } + } while ((e = e->next) != ring->start); +} + + +/* Allocate buffers for receive ring + * For receive: to_clean is next received frame. + */ +static int skge_rx_fill(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct sk_buff *skb; + + skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, + GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, NET_IP_ALIGN); + skge_rx_setup(skge, e, skb, skge->rx_buf_size); + } while ((e = e->next) != ring->start); + + ring->to_clean = ring->start; + return 0; +} + +static const char *skge_pause(enum pause_status status) +{ + switch (status) { + case FLOW_STAT_NONE: + return "none"; + case FLOW_STAT_REM_SEND: + return "rx only"; + case FLOW_STAT_LOC_SEND: + return "tx_only"; + case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ + return "both"; + default: + return "indeterminated"; + } +} + + +static void skge_link_up(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), + LED_BLK_OFF|LED_SYNC_OFF|LED_ON); + + netif_carrier_on(skge->netdev); + netif_wake_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, + "Link is up at %d Mbps, %s duplex, flow control %s\n", + skge->speed, + skge->duplex == DUPLEX_FULL ? "full" : "half", + skge_pause(skge->flow_status)); +} + +static void skge_link_down(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); + netif_carrier_off(skge->netdev); + netif_stop_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, "Link is down\n"); +} + +static void xm_link_down(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + + if (netif_carrier_ok(dev)) + skge_link_down(skge); +} + +static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + *val = xm_read16(hw, port, XM_PHY_DATA); + + if (hw->phy_type == SK_PHY_XMAC) + goto ready; + + for (i = 0; i < PHY_RETRIES; i++) { + if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) + goto ready; + udelay(1); + } + + return -ETIMEDOUT; + ready: + *val = xm_read16(hw, port, XM_PHY_DATA); + + return 0; +} + +static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__xm_phy_read(hw, port, reg, &v)) + pr_warning("%s: phy read timed out\n", hw->dev[port]->name); + return v; +} + +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + goto ready; + udelay(1); + } + return -EIO; + + ready: + xm_write16(hw, port, XM_PHY_DATA, val); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void genesis_init(struct skge_hw *hw) +{ + /* set blink source counter */ + skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); + skge_write8(hw, B2_BSC_CTRL, BSC_START); + + /* configure mac arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure mac arbiter timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* configure packet arbiter timeout */ + skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); + skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); +} + +static void genesis_reset(struct skge_hw *hw, int port) +{ + static const u8 zero[8] = { 0 }; + u32 reg; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + + /* reset the statistics module */ + xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ + xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ + xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ + + /* disable Broadcom PHY IRQ */ + if (hw->phy_type == SK_PHY_BCOM) + xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); + + xm_outhash(hw, port, XM_HSM, zero); + + /* Flush TX and RX fifo */ + reg = xm_read32(hw, port, XM_MODE); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); +} + +/* Convert mode to MII values */ +static const u16 phy_pause_map[] = { + [FLOW_MODE_NONE] = 0, + [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, + [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, + [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, +}; + +/* special defines for FIBER (88E1011S only) */ +static const u16 fiber_pause_map[] = { + [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, + [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, + [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, + [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, +}; + + +/* Check status of Broadcom phy link */ +static void bcom_check_link(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_BCOM_STAT); + status = xm_phy_read(hw, port, PHY_BCOM_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, aux; + + if (!(status & PHY_ST_AN_OVER)) + return; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return; + } + + aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); + + /* Check Duplex mismatch */ + switch (aux & PHY_B_AS_AN_RES_MSK) { + case PHY_B_RES_1000FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_B_RES_1000HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (aux & PHY_B_AS_PAUSE_MSK) { + case PHY_B_AS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_B_AS_PRR: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_B_AS_PRT: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); +} + +/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional + * Phy on for 100 or 10Mbit operation + */ +static void bcom_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + u16 id1, r, ext, ctl; + + /* magic workaround patterns for Broadcom */ + static const struct { + u16 reg; + u16 val; + } A1hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, + { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, + { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, + { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, + }, C0hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, + { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, + }; + + /* read Id from external PHY (all have the same address) */ + id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); + + /* Optimize MDIO transfer by suppressing preamble. */ + r = xm_read16(hw, port, XM_MMU_CMD); + r |= XM_MMU_NO_PRE; + xm_write16(hw, port, XM_MMU_CMD, r); + + switch (id1) { + case PHY_BCOM_ID1_C0: + /* + * Workaround BCOM Errata for the C0 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(C0hack); i++) + xm_phy_write(hw, port, + C0hack[i].reg, C0hack[i].val); + + break; + case PHY_BCOM_ID1_A1: + /* + * Workaround BCOM Errata for the A1 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(A1hack); i++) + xm_phy_write(hw, port, + A1hack[i].reg, A1hack[i].val); + break; + } + + /* + * Workaround BCOM Errata (#10523) for all BCom PHYs. + * Disable Power Management after reset. + */ + r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); + r |= PHY_B_AC_DIS_PM; + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); + + /* Dummy read */ + xm_read16(hw, port, XM_ISRC); + + ext = PHY_B_PEC_EN_LTR; /* enable tx led */ + ctl = PHY_CT_SP1000; /* always 1000mbit */ + + if (skge->autoneg == AUTONEG_ENABLE) { + /* + * Workaround BCOM Errata #1 for the C5 type. + * 1000Base-T Link Acquisition Failure in Slave Mode + * Set Repeater/DTE bit 10 of the 1000Base-T Control Register + */ + u16 adv = PHY_B_1000C_RD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_B_1000C_AHD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_B_1000C_AFD; + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); + + ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + if (skge->duplex == DUPLEX_FULL) + ctl |= PHY_CT_DUP_MD; + /* Force to slave */ + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); + } + + /* Set autonegotiation pause parameters */ + xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, + phy_pause_map[skge->flow_control] | PHY_AN_CSMA); + + /* Handle Jumbo frames */ + if (hw->dev[port]->mtu > ETH_DATA_LEN) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); + + ext |= PHY_B_PEC_HIGH_LA; + + } + + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); + xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); + + /* Use link status change interrupt */ + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); +} + +static void xm_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl = 0; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (skge->advertising & ADVERTISED_1000baseT_Half) + ctrl |= PHY_X_AN_HD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + ctrl |= PHY_X_AN_FD; + + ctrl |= fiber_pause_map[skge->flow_control]; + + xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); + + /* Restart Auto-negotiation */ + ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* Set DuplexMode in Config register */ + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLEs are transmitted + */ + } + + xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); + + /* Poll PHY for status changes */ + mod_timer(&skge->link_timer, jiffies + LINK_HZ); +} + +static int xm_check_link(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_XMAC_STAT); + status = xm_phy_read(hw, port, PHY_XMAC_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return 0; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, res; + + if (!(status & PHY_ST_AN_OVER)) + return 0; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return 0; + } + + res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); + + /* Check Duplex mismatch */ + switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { + case PHY_X_RS_FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_X_RS_HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return 0; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((skge->flow_control == FLOW_MODE_SYMMETRIC || + skge->flow_control == FLOW_MODE_SYM_OR_REM) && + (lpa & PHY_X_P_SYM_MD)) + skge->flow_status = FLOW_STAT_SYMMETRIC; + else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) + /* Enable PAUSE receive, disable PAUSE transmit */ + skge->flow_status = FLOW_STAT_REM_SEND; + else if (skge->flow_control == FLOW_MODE_LOC_SEND && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) + /* Disable PAUSE receive, enable PAUSE transmit */ + skge->flow_status = FLOW_STAT_LOC_SEND; + else + skge->flow_status = FLOW_STAT_NONE; + + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); + return 1; +} + +/* Poll to check for link coming up. + * + * Since internal PHY is wired to a level triggered pin, can't + * get an interrupt when carrier is detected, need to poll for + * link coming up. + */ +static void xm_link_timer(unsigned long arg) +{ + struct skge_port *skge = (struct skge_port *) arg; + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long flags; + + if (!netif_running(dev)) + return; + + spin_lock_irqsave(&hw->phy_lock, flags); + + /* + * Verify that the link by checking GPIO register three times. + * This pin has the signal from the link_sync pin connected to it. + */ + for (i = 0; i < 3; i++) { + if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) + goto link_down; + } + + /* Re-enable interrupt to detect link down */ + if (xm_check_link(dev)) { + u16 msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_INP_ASS; + xm_write16(hw, port, XM_IMSK, msk); + xm_read16(hw, port, XM_ISRC); + } else { +link_down: + mod_timer(&skge->link_timer, + round_jiffies(jiffies + LINK_HZ)); + } + spin_unlock_irqrestore(&hw->phy_lock, flags); +} + +static void genesis_mac_init(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; + int i; + u32 r; + static const u8 zero[6] = { 0 }; + + for (i = 0; i < 10; i++) { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_SET_MAC_RST); + if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) + goto reset_ok; + udelay(1); + } + + netdev_warn(dev, "genesis reset failed\n"); + + reset_ok: + /* Unreset the XMAC. */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + + /* + * Perform additional initialization for external PHYs, + * namely for the 1000baseTX cards that use the XMAC's + * GMII mode. + */ + if (hw->phy_type != SK_PHY_XMAC) { + /* Take external Phy out of reset */ + r = skge_read32(hw, B2_GP_IO); + if (port == 0) + r |= GP_DIR_0|GP_IO_0; + else + r |= GP_DIR_2|GP_IO_2; + + skge_write32(hw, B2_GP_IO, r); + + /* Enable GMII interface */ + xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); + } + + + switch (hw->phy_type) { + case SK_PHY_XMAC: + xm_phy_init(skge); + break; + case SK_PHY_BCOM: + bcom_phy_init(skge); + bcom_check_link(hw, port); + } + + /* Set Station Address */ + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + + /* We don't use match addresses so clear */ + for (i = 1; i < 16; i++) + xm_outaddr(hw, port, XM_EXM(i), zero); + + /* Clear MIB counters */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + + /* configure Rx High Water Mark (XM_RX_HI_WM) */ + xm_write16(hw, port, XM_RX_HI_WM, 1450); + + /* We don't need the FCS appended to the packet. */ + r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; + if (jumbo) + r |= XM_RX_BIG_PK_OK; + + if (skge->duplex == DUPLEX_HALF) { + /* + * If in manual half duplex mode the other side might be in + * full duplex mode, so ignore if a carrier extension is not seen + * on frames received + */ + r |= XM_RX_DIS_CEXT; + } + xm_write16(hw, port, XM_RX_CMD, r); + + /* We want short frames padded to 60 bytes. */ + xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); + + /* Increase threshold for jumbo frames on dual port */ + if (hw->ports > 1 && jumbo) + xm_write16(hw, port, XM_TX_THR, 1020); + else + xm_write16(hw, port, XM_TX_THR, 512); + + /* + * Enable the reception of all error frames. This is is + * a necessary evil due to the design of the XMAC. The + * XMAC's receive FIFO is only 8K in size, however jumbo + * frames can be up to 9000 bytes in length. When bad + * frame filtering is enabled, the XMAC's RX FIFO operates + * in 'store and forward' mode. For this to work, the + * entire frame has to fit into the FIFO, but that means + * that jumbo frames larger than 8192 bytes will be + * truncated. Disabling all bad frame filtering causes + * the RX FIFO to operate in streaming mode, in which + * case the XMAC will start transferring frames out of the + * RX FIFO as soon as the FIFO threshold is reached. + */ + xm_write32(hw, port, XM_MODE, XM_DEF_MODE); + + + /* + * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) + * - Enable all bits excepting 'Octets Rx OK Low CntOv' + * and 'Octets Rx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); + + /* + * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) + * - Enable all bits excepting 'Octets Tx OK Low CntOv' + * and 'Octets Tx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); + + /* Configure MAC arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, 72); + skge_write8(hw, B3_MA_TOINI_RX2, 72); + skge_write8(hw, B3_MA_TOINI_TX1, 72); + skge_write8(hw, B3_MA_TOINI_TX2, 72); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* Configure Rx MAC FIFO */ + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); + + if (jumbo) { + /* Enable frame flushing if jumbo frames used */ + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); + } else { + /* enable timeout timers if normal frames */ + skge_write16(hw, B3_PA_CTRL, + (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); + } +} + +static void genesis_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + unsigned retries = 1000; + u16 cmd; + + /* Disable Tx and Rx */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); + xm_write16(hw, port, XM_MMU_CMD, cmd); + + genesis_reset(hw, port); + + /* Clear Tx packet arbiter timeout IRQ */ + skge_write16(hw, B3_PA_CTRL, + port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); + + /* Reset the MAC */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + do { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) + break; + } while (--retries > 0); + + /* For external PHYs there must be special handling */ + if (hw->phy_type != SK_PHY_XMAC) { + u32 reg = skge_read32(hw, B2_GP_IO); + if (port == 0) { + reg |= GP_DIR_0; + reg &= ~GP_IO_0; + } else { + reg |= GP_DIR_2; + reg &= ~GP_IO_2; + } + skge_write32(hw, B2_GP_IO, reg); + skge_read32(hw, B2_GP_IO); + } + + xm_write16(hw, port, XM_MMU_CMD, + xm_read16(hw, port, XM_MMU_CMD) + & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); + + xm_read16(hw, port, XM_MMU_CMD); +} + + +static void genesis_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long timeout = jiffies + HZ; + + xm_write16(hw, port, + XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); + + /* wait for update to complete */ + while (xm_read16(hw, port, XM_STAT_CMD) + & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { + if (time_after(jiffies, timeout)) + break; + udelay(10); + } + + /* special case for 64 bit octet counter */ + data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 + | xm_read32(hw, port, XM_TXO_OK_LO); + data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 + | xm_read32(hw, port, XM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); +} + +static void genesis_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status = xm_read16(hw, port, XM_ISRC); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { + xm_link_down(hw, port); + mod_timer(&skge->link_timer, jiffies + 1); + } + + if (status & XM_IS_TXF_UR) { + xm_write32(hw, port, XM_MODE, XM_MD_FTF); + ++dev->stats.tx_fifo_errors; + } +} + +static void genesis_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 cmd, msk; + u32 mode; + + cmd = xm_read16(hw, port, XM_MMU_CMD); + + /* + * enabling pause frame reception is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + if (skge->flow_status == FLOW_STAT_NONE || + skge->flow_status == FLOW_STAT_LOC_SEND) + /* Disable Pause Frame Reception */ + cmd |= XM_MMU_IGN_PF; + else + /* Enable Pause Frame Reception */ + cmd &= ~XM_MMU_IGN_PF; + + xm_write16(hw, port, XM_MMU_CMD, cmd); + + mode = xm_read32(hw, port, XM_MODE); + if (skge->flow_status == FLOW_STAT_SYMMETRIC || + skge->flow_status == FLOW_STAT_LOC_SEND) { + /* + * Configure Pause Frame Generation + * Use internal and external Pause Frame Generation. + * Sending pause frames is edge triggered. + * Send a Pause frame with the maximum pause time if + * internal oder external FIFO full condition occurs. + * Send a zero pause time frame to re-start transmission. + */ + /* XM_PAUSE_DA = '010000C28001' (default) */ + /* XM_MAC_PTIME = 0xffff (maximum) */ + /* remember this value is defined in big endian (!) */ + xm_write16(hw, port, XM_MAC_PTIME, 0xffff); + + mode |= XM_PAUSE_MODE; + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); + } else { + /* + * disable pause frame generation is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + /* Disable Pause Mode in Mode Register */ + mode &= ~XM_PAUSE_MODE; + + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); + } + + xm_write32(hw, port, XM_MODE, mode); + + /* Turn on detection of Tx underrun */ + msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_TXF_UR; + xm_write16(hw, port, XM_IMSK, msk); + + xm_read16(hw, port, XM_ISRC); + + /* get MMU Command Reg. */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) + cmd |= XM_MMU_GMII_FD; + + /* + * Workaround BCOM Errata (#10523) for all BCom Phys + * Enable Power Management after link up + */ + if (hw->phy_type == SK_PHY_BCOM) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) + & ~PHY_B_AC_DIS_PM); + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); + } + + /* enable Rx/Tx */ + xm_write16(hw, port, XM_MMU_CMD, + cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); + skge_link_up(skge); +} + + +static inline void bcom_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 isrc; + + isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x\n", isrc); + + if (isrc & PHY_B_IS_PSE) + pr_err("%s: uncorrectable pair swap error\n", + hw->dev[port]->name); + + /* Workaround BCom Errata: + * enable and disable loopback mode if "NO HCD" occurs. + */ + if (isrc & PHY_B_IS_NO_HDCL) { + u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl | PHY_CT_LOOP); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl & ~PHY_CT_LOOP); + } + + if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) + bcom_check_link(hw, port); + +} + +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + gma_write16(hw, port, GM_SMI_DATA, val); + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + + if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) + return 0; + } + + pr_warning("%s: phy write timeout\n", hw->dev[port]->name); + return -EIO; +} + +static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); + + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) + goto ready; + } + + return -ETIMEDOUT; + ready: + *val = gma_read16(hw, port, GM_SMI_DATA); + return 0; +} + +static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__gm_phy_read(hw, port, reg, &v)) + pr_warning("%s: phy read timeout\n", hw->dev[port]->name); + return v; +} + +/* Marvell Phy Initialization */ +static void yukon_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + u16 ctrl, ct1000, adv; + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + + ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + + ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); + } + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + if (skge->autoneg == AUTONEG_DISABLE) + ctrl &= ~PHY_CT_ANE; + + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + ctrl = 0; + ct1000 = 0; + adv = PHY_AN_CSMA; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (hw->copper) { + if (skge->advertising & ADVERTISED_1000baseT_Full) + ct1000 |= PHY_M_1000C_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + ct1000 |= PHY_M_1000C_AHD; + if (skge->advertising & ADVERTISED_100baseT_Full) + adv |= PHY_M_AN_100_FD; + if (skge->advertising & ADVERTISED_100baseT_Half) + adv |= PHY_M_AN_100_HD; + if (skge->advertising & ADVERTISED_10baseT_Full) + adv |= PHY_M_AN_10_FD; + if (skge->advertising & ADVERTISED_10baseT_Half) + adv |= PHY_M_AN_10_HD; + + /* Set Flow-control capabilities */ + adv |= phy_pause_map[skge->flow_control]; + } else { + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_M_AN_1000X_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_M_AN_1000X_AHD; + + adv |= fiber_pause_map[skge->flow_control]; + } + + /* Restart Auto-negotiation */ + ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* forced speed/duplex settings */ + ct1000 = PHY_M_1000C_MSE; + + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + + switch (skge->speed) { + case SPEED_1000: + ctrl |= PHY_CT_SP1000; + break; + case SPEED_100: + ctrl |= PHY_CT_SP100; + break; + } + + ctrl |= PHY_CT_RESET; + } + + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); + + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* Enable phy interrupt on autonegotiation complete (or link up) */ + if (skge->autoneg == AUTONEG_ENABLE) + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); + else + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); +} + +static void yukon_reset(struct skge_hw *hw, int port) +{ + gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ + gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ + gma_write16(hw, port, GM_MC_ADDR_H2, 0); + gma_write16(hw, port, GM_MC_ADDR_H3, 0); + gma_write16(hw, port, GM_MC_ADDR_H4, 0); + + gma_write16(hw, port, GM_RX_CTRL, + gma_read16(hw, port, GM_RX_CTRL) + | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); +} + +/* Apparently, early versions of Yukon-Lite had wrong chip_id? */ +static int is_yukon_lite_a0(struct skge_hw *hw) +{ + u32 reg; + int ret; + + if (hw->chip_id != CHIP_ID_YUKON) + return 0; + + reg = skge_read32(hw, B2_FAR); + skge_write8(hw, B2_FAR + 3, 0xff); + ret = (skge_read8(hw, B2_FAR + 3) != 0); + skge_write32(hw, B2_FAR, reg); + return ret; +} + +static void yukon_mac_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + int i; + u32 reg; + const u8 *addr = hw->dev[port]->dev_addr; + + /* WA code for COMA mode -- set PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9 | GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* hard reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* Set hardware config mode */ + reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | + GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; + reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; + + /* Clear GMC reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); + + if (skge->autoneg == AUTONEG_DISABLE) { + reg = GM_GPCR_AU_ALL_DIS; + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) | reg); + + switch (skge->speed) { + case SPEED_1000: + reg &= ~GM_GPCR_SPEED_100; + reg |= GM_GPCR_SPEED_1000; + break; + case SPEED_100: + reg &= ~GM_GPCR_SPEED_1000; + reg |= GM_GPCR_SPEED_100; + break; + case SPEED_10: + reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); + break; + } + + if (skge->duplex == DUPLEX_FULL) + reg |= GM_GPCR_DUP_FULL; + } else + reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; + + switch (skge->flow_control) { + case FLOW_MODE_NONE: + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_LOC_SEND: + /* disable Rx flow-control */ + reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_SYMMETRIC: + case FLOW_MODE_SYM_OR_REM: + /* enable Tx & Rx flow-control */ + break; + } + + gma_write16(hw, port, GM_GP_CTRL, reg); + skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); + + yukon_init(hw, port); + + /* MIB clear */ + reg = gma_read16(hw, port, GM_PHY_ADDR); + gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); + + for (i = 0; i < GM_MIB_CNT_SIZE; i++) + gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); + gma_write16(hw, port, GM_PHY_ADDR, reg); + + /* transmit control */ + gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); + + /* receive control reg: unicast + multicast + no FCS */ + gma_write16(hw, port, GM_RX_CTRL, + GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); + + /* transmit flow control */ + gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); + + /* transmit parameter */ + gma_write16(hw, port, GM_TX_PARAM, + TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | + TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | + TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); + + /* configure the Serial Mode Register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) + | GM_SMOD_VLAN_ENA + | IPG_DATA_VAL(IPG_DATA_DEF); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) + reg |= GM_SMOD_JUMBO_ENA; + + gma_write16(hw, port, GM_SERIAL_MODE, reg); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); + + /* enable interrupt mask for counter overflows */ + gma_write16(hw, port, GM_TX_IRQ_MSK, 0); + gma_write16(hw, port, GM_RX_IRQ_MSK, 0); + gma_write16(hw, port, GM_TR_IRQ_MSK, 0); + + /* Initialize Mac Fifo */ + + /* Configure Rx MAC FIFO */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); + reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + + /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (is_yukon_lite_a0(hw)) + reg &= ~GMF_RX_F_FL_ON; + + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); + /* + * because Pause Packet Truncation in GMAC is not working + * we have to increase the Flush Threshold to 64 bytes + * in order to flush pause packets in Rx FIFO on Yukon-1 + */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); +} + +/* Go into power down mode */ +static void yukon_suspend(struct skge_hw *hw, int port) +{ + u16 ctrl; + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_POL_R_DIS; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* switch IEEE compatible power down mode on */ + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_PDOWN; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); +} + +static void yukon_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + yukon_reset(hw, port); + + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) + & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); + gma_read16(hw, port, GM_GP_CTRL); + + yukon_suspend(hw, port); + + /* set GPHY Control reset */ + skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); +} + +static void yukon_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + + data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 + | gma_read32(hw, port, GM_TXO_OK_LO); + data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 + | gma_read32(hw, port, GM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = gma_read32(hw, port, + skge_stats[i].gma_offset); +} + +static void yukon_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (status & GM_IS_RX_FF_OR) { + ++dev->stats.rx_fifo_errors; + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); + } + + if (status & GM_IS_TX_FF_UR) { + ++dev->stats.tx_fifo_errors; + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); + } + +} + +static u16 yukon_speed(const struct skge_hw *hw, u16 aux) +{ + switch (aux & PHY_M_PS_SPEED_MSK) { + case PHY_M_PS_SPEED_1000: + return SPEED_1000; + case PHY_M_PS_SPEED_100: + return SPEED_100; + default: + return SPEED_10; + } +} + +static void yukon_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 reg; + + /* Enable Transmit FIFO Underrun */ + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); + + reg = gma_read16(hw, port, GM_GP_CTRL); + if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) + reg |= GM_GPCR_DUP_FULL; + + /* enable Rx/Tx */ + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; + gma_write16(hw, port, GM_GP_CTRL, reg); + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); + skge_link_up(skge); +} + +static void yukon_link_down(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + ctrl = gma_read16(hw, port, GM_GP_CTRL); + ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + gma_write16(hw, port, GM_GP_CTRL, ctrl); + + if (skge->flow_status == FLOW_STAT_REM_SEND) { + ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); + ctrl |= PHY_M_AN_ASP; + /* restore Asymmetric Pause bit */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); + } + + skge_link_down(skge); + + yukon_init(hw, port); +} + +static void yukon_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + const char *reason = NULL; + u16 istatus, phystat; + + istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x 0x%x\n", istatus, phystat); + + if (istatus & PHY_M_IS_AN_COMPL) { + if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) + & PHY_M_AN_RF) { + reason = "remote fault"; + goto failed; + } + + if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { + reason = "master/slave fault"; + goto failed; + } + + if (!(phystat & PHY_M_PS_SPDUP_RES)) { + reason = "speed/duplex"; + goto failed; + } + + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) + ? DUPLEX_FULL : DUPLEX_HALF; + skge->speed = yukon_speed(hw, phystat); + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (phystat & PHY_M_PS_PAUSE_MSK) { + case PHY_M_PS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_M_PS_RX_P_EN: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_M_PS_TX_P_EN: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + + if (skge->flow_status == FLOW_STAT_NONE || + (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + else + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + yukon_link_up(skge); + return; + } + + if (istatus & PHY_M_IS_LSP_CHANGE) + skge->speed = yukon_speed(hw, phystat); + + if (istatus & PHY_M_IS_DUP_CHANGE) + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + if (istatus & PHY_M_IS_LST_CHANGE) { + if (phystat & PHY_M_PS_LINK_UP) + yukon_link_up(skge); + else + yukon_link_down(skge); + } + return; + failed: + pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); + + /* XXX restart autonegotiation? */ +} + +static void skge_phy_reset(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct net_device *dev = hw->dev[port]; + + netif_stop_queue(skge->netdev); + netif_carrier_off(skge->netdev); + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + genesis_reset(hw, port); + genesis_mac_init(hw, port); + } else { + yukon_reset(hw, port); + yukon_init(hw, port); + } + spin_unlock_bh(&hw->phy_lock); + + skge_set_multicast(dev); +} + +/* Basic MII support */ +static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int err = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u16 val = 0; + spin_lock_bh(&hw->phy_lock); + + if (is_genesis(hw)) + err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + else + err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + spin_unlock_bh(&hw->phy_lock); + data->val_out = val; + break; + } + + case SIOCSMIIREG: + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + else + err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + spin_unlock_bh(&hw->phy_lock); + break; + } + return err; +} + +static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) +{ + u32 end; + + start /= 8; + len /= 8; + end = start + len - 1; + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); + skge_write32(hw, RB_ADDR(q, RB_START), start); + skge_write32(hw, RB_ADDR(q, RB_WP), start); + skge_write32(hw, RB_ADDR(q, RB_RP), start); + skge_write32(hw, RB_ADDR(q, RB_END), end); + + if (q == Q_R1 || q == Q_R2) { + /* Set thresholds on receive queue's */ + skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), + start + (2*len)/3); + skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), + start + (len/3)); + } else { + /* Enable store & forward on Tx queue's because + * Tx FIFO is only 4K on Genesis and 1K on Yukon + */ + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); + } + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); +} + +/* Setup Bus Memory Interface */ +static void skge_qset(struct skge_port *skge, u16 q, + const struct skge_element *e) +{ + struct skge_hw *hw = skge->hw; + u32 watermark = 0x600; + u64 base = skge->dma + (e->desc - skge->mem); + + /* optimization to reduce window on 32bit/33mhz */ + if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) + watermark /= 2; + + skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); + skge_write32(hw, Q_ADDR(q, Q_F), watermark); + skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); + skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); +} + +static int skge_up(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 chunk, ram_addr; + size_t rx_size, tx_size; + int err; + + if (!is_valid_ether_addr(dev->dev_addr)) + return -EINVAL; + + netif_info(skge, ifup, skge->netdev, "enabling interface\n"); + + if (dev->mtu > RX_BUF_SIZE) + skge->rx_buf_size = dev->mtu + ETH_HLEN; + else + skge->rx_buf_size = RX_BUF_SIZE; + + + rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); + tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); + skge->mem_size = tx_size + rx_size; + skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); + if (!skge->mem) + return -ENOMEM; + + BUG_ON(skge->dma & 7); + + if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { + dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); + err = -EINVAL; + goto free_pci_mem; + } + + memset(skge->mem, 0, skge->mem_size); + + err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); + if (err) + goto free_pci_mem; + + err = skge_rx_fill(dev); + if (err) + goto free_rx_ring; + + err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, + skge->dma + rx_size); + if (err) + goto free_rx_ring; + + /* Initialize MAC */ + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + genesis_mac_init(hw, port); + else + yukon_mac_init(hw, port); + spin_unlock_bh(&hw->phy_lock); + + /* Configure RAMbuffers - equally between ports and tx/rx */ + chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); + ram_addr = hw->ram_offset + 2 * chunk * port; + + skge_ramset(hw, rxqaddr[port], ram_addr, chunk); + skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); + + BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); + skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); + skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); + + /* Start receiver BMU */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); + skge_led(skge, LED_MODE_ON); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= portmask[port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + spin_unlock_irq(&hw->hw_lock); + + napi_enable(&skge->napi); + return 0; + + free_rx_ring: + skge_rx_clean(skge); + kfree(skge->rx_ring.start); + free_pci_mem: + pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + skge->mem = NULL; + + return err; +} + +/* stop receiver */ +static void skge_rx_stop(struct skge_hw *hw, int port) +{ + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); +} + +static int skge_down(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + if (skge->mem == NULL) + return 0; + + netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); + + netif_tx_disable(dev); + + if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) + del_timer_sync(&skge->link_timer); + + napi_disable(&skge->napi); + netif_carrier_off(dev); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask &= ~portmask[port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + spin_unlock_irq(&hw->hw_lock); + + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); + if (is_genesis(hw)) + genesis_stop(skge); + else + yukon_stop(skge); + + /* Stop transmitter */ + skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + + + /* Disable Force Sync bit and Enable Alloc bit */ + skge_write8(hw, SK_REG(port, TXA_CTRL), + TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); + skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); + + /* Reset PCI FIFO */ + skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); + + /* Reset the RAM Buffer async Tx queue */ + skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); + + skge_rx_stop(hw, port); + + if (is_genesis(hw)) { + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); + } else { + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + } + + skge_led(skge, LED_MODE_OFF); + + netif_tx_lock_bh(dev); + skge_tx_clean(dev); + netif_tx_unlock_bh(dev); + + skge_rx_clean(skge); + + kfree(skge->rx_ring.start); + kfree(skge->tx_ring.start); + pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + skge->mem = NULL; + return 0; +} + +static inline int skge_avail(const struct skge_ring *ring) +{ + smp_mb(); + return ((ring->to_clean > ring->to_use) ? 0 : ring->count) + + (ring->to_clean - ring->to_use) - 1; +} + +static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, + struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + struct skge_element *e; + struct skge_tx_desc *td; + int i; + u32 control, len; + u64 map; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) + return NETDEV_TX_BUSY; + + e = skge->tx_ring.to_use; + td = e->desc; + BUG_ON(td->control & BMU_OWN); + e->skb = skb; + len = skb_headlen(skb); + map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, len); + + td->dma_lo = map; + td->dma_hi = map >> 32; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const int offset = skb_checksum_start_offset(skb); + + /* This seems backwards, but it is what the sk98lin + * does. Looks like hardware is wrong? + */ + if (ipip_hdr(skb)->protocol == IPPROTO_UDP && + hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) + control = BMU_TCP_CHECK; + else + control = BMU_UDP_CHECK; + + td->csum_offs = 0; + td->csum_start = offset; + td->csum_write = offset + skb->csum_offset; + } else + control = BMU_CHECK; + + if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ + control |= BMU_EOF | BMU_IRQ_EOF; + else { + struct skge_tx_desc *tf = td; + + control |= BMU_STFWD; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + map = pci_map_page(hw->pdev, frag->page, frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + + e = e->next; + e->skb = skb; + tf = e->desc; + BUG_ON(tf->control & BMU_OWN); + + tf->dma_lo = map; + tf->dma_hi = (u64) map >> 32; + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, frag->size); + + tf->control = BMU_OWN | BMU_SW | control | frag->size; + } + tf->control |= BMU_EOF | BMU_IRQ_EOF; + } + /* Make sure all the descriptors written */ + wmb(); + td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; + wmb(); + + skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); + + netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, + "tx queued, slot %td, len %d\n", + e - skge->tx_ring.start, skb->len); + + skge->tx_ring.to_use = e->next; + smp_wmb(); + + if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { + netdev_dbg(dev, "transmit queue full\n"); + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; +} + + +/* Free resources associated with this reing element */ +static void skge_tx_free(struct skge_port *skge, struct skge_element *e, + u32 control) +{ + struct pci_dev *pdev = skge->hw->pdev; + + /* skb header vs. fragment */ + if (control & BMU_STF) + pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + else + pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + + if (control & BMU_EOF) { + netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, + "tx done slot %td\n", e - skge->tx_ring.start); + + dev_kfree_skb(e->skb); + } +} + +/* Free all buffers in transmit ring */ +static void skge_tx_clean(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_element *e; + + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + struct skge_tx_desc *td = e->desc; + skge_tx_free(skge, e, td->control); + td->control = 0; + } + + skge->tx_ring.to_clean = e; +} + +static void skge_tx_timeout(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); + skge_tx_clean(dev); + netif_wake_queue(dev); +} + +static int skge_change_mtu(struct net_device *dev, int new_mtu) +{ + int err; + + if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) + return -EINVAL; + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + return 0; + } + + skge_down(dev); + + dev->mtu = new_mtu; + + err = skge_up(dev); + if (err) + dev_close(dev); + + return err; +} + +static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; + +static void genesis_add_filter(u8 filter[8], const u8 *addr) +{ + u32 crc, bit; + + crc = ether_crc_le(ETH_ALEN, addr); + bit = ~crc & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void genesis_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + u32 mode; + u8 filter[8]; + + mode = xm_read32(hw, port, XM_MODE); + mode |= XM_MD_ENA_HASH; + if (dev->flags & IFF_PROMISC) + mode |= XM_MD_ENA_PROM; + else + mode &= ~XM_MD_ENA_PROM; + + if (dev->flags & IFF_ALLMULTI) + memset(filter, 0xff, sizeof(filter)); + else { + memset(filter, 0, sizeof(filter)); + + if (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC) + genesis_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + genesis_add_filter(filter, ha->addr); + } + + xm_write32(hw, port, XM_MODE, mode); + xm_outhash(hw, port, XM_HSM, filter); +} + +static void yukon_add_filter(u8 filter[8], const u8 *addr) +{ + u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void yukon_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC); + u16 reg; + u8 filter[8]; + + memset(filter, 0, sizeof(filter)); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA; + + if (dev->flags & IFF_PROMISC) /* promiscuous */ + reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + else if (dev->flags & IFF_ALLMULTI) /* all multicast */ + memset(filter, 0xff, sizeof(filter)); + else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */ + reg &= ~GM_RXCR_MCF_ENA; + else { + reg |= GM_RXCR_MCF_ENA; + + if (rx_pause) + yukon_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + yukon_add_filter(filter, ha->addr); + } + + + gma_write16(hw, port, GM_MC_ADDR_H1, + (u16)filter[0] | ((u16)filter[1] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H2, + (u16)filter[2] | ((u16)filter[3] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H3, + (u16)filter[4] | ((u16)filter[5] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H4, + (u16)filter[6] | ((u16)filter[7] << 8)); + + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +static inline u16 phy_length(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return status >> XMR_FS_LEN_SHIFT; + else + return status >> GMR_FS_LEN_SHIFT; +} + +static inline int bad_phy_status(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; + else + return (status & GMR_FS_ANY_ERR) || + (status & GMR_FS_RX_OK) == 0; +} + +static void skge_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_set_multicast(dev); + else + yukon_set_multicast(dev); + +} + + +/* Get receive buffer from descriptor. + * Handles copy of small buffers and reallocation failures + */ +static struct sk_buff *skge_rx_get(struct net_device *dev, + struct skge_element *e, + u32 control, u32 status, u16 csum) +{ + struct skge_port *skge = netdev_priv(dev); + struct sk_buff *skb; + u16 len = control & BMU_BBC; + + netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, + "rx slot %td status 0x%x len %d\n", + e - skge->rx_ring.start, status, len); + + if (len > skge->rx_buf_size) + goto error; + + if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) + goto error; + + if (bad_phy_status(skge->hw, status)) + goto error; + + if (phy_length(skge->hw, status) != len) + goto error; + + if (len < RX_COPY_THRESHOLD) { + skb = netdev_alloc_skb_ip_align(dev, len); + if (!skb) + goto resubmit; + + pci_dma_sync_single_for_cpu(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + len, PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(e->skb, skb->data, len); + pci_dma_sync_single_for_device(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + len, PCI_DMA_FROMDEVICE); + skge_rx_reuse(e, skge->rx_buf_size); + } else { + struct sk_buff *nskb; + + nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); + if (!nskb) + goto resubmit; + + pci_unmap_single(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + skb = e->skb; + prefetch(skb->data); + skge_rx_setup(skge, e, nskb, skge->rx_buf_size); + } + + skb_put(skb, len); + + if (dev->features & NETIF_F_RXCSUM) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } + + skb->protocol = eth_type_trans(skb, dev); + + return skb; +error: + + netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, + "rx err, slot %td control 0x%x status 0x%x\n", + e - skge->rx_ring.start, control, status); + + if (is_genesis(skge->hw)) { + if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) + dev->stats.rx_length_errors++; + if (status & XMR_FS_FRA_ERR) + dev->stats.rx_frame_errors++; + if (status & XMR_FS_FCS_ERR) + dev->stats.rx_crc_errors++; + } else { + if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) + dev->stats.rx_length_errors++; + if (status & GMR_FS_FRAGMENT) + dev->stats.rx_frame_errors++; + if (status & GMR_FS_CRC_ERR) + dev->stats.rx_crc_errors++; + } + +resubmit: + skge_rx_reuse(e, skge->rx_buf_size); + return NULL; +} + +/* Free all buffers in Tx ring which are no longer owned by device */ +static void skge_tx_done(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->tx_ring; + struct skge_element *e; + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; e != ring->to_use; e = e->next) { + u32 control = ((const struct skge_tx_desc *) e->desc)->control; + + if (control & BMU_OWN) + break; + + skge_tx_free(skge, e, control); + } + skge->tx_ring.to_clean = e; + + /* Can run lockless until we need to synchronize to restart queue. */ + smp_mb(); + + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_tx_lock(dev); + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_wake_queue(dev); + + } + netif_tx_unlock(dev); + } +} + +static int skge_poll(struct napi_struct *napi, int to_do) +{ + struct skge_port *skge = container_of(napi, struct skge_port, napi); + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + int work_done = 0; + + skge_tx_done(dev); + + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { + struct skge_rx_desc *rd = e->desc; + struct sk_buff *skb; + u32 control; + + rmb(); + control = rd->control; + if (control & BMU_OWN) + break; + + skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); + if (likely(skb)) { + napi_gro_receive(napi, skb); + ++work_done; + } + } + ring->to_clean = e; + + /* restart receiver */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); + + if (work_done < to_do) { + unsigned long flags; + + napi_gro_flush(napi); + spin_lock_irqsave(&hw->hw_lock, flags); + __napi_complete(napi); + hw->intr_mask |= napimask[skge->port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irqrestore(&hw->hw_lock, flags); + } + + return work_done; +} + +/* Parity errors seem to happen when Genesis is connected to a switch + * with no other ports present. Heartbeat error?? + */ +static void skge_mac_parity(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + + ++dev->stats.tx_heartbeat_errors; + + if (is_genesis(hw)) + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_CLR_PERR); + else + /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), + (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); +} + +static void skge_mac_intr(struct skge_hw *hw, int port) +{ + if (is_genesis(hw)) + genesis_mac_intr(hw, port); + else + yukon_mac_intr(hw, port); +} + +/* Handle device specific framing and timeout interrupts */ +static void skge_error_irq(struct skge_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); + + if (is_genesis(hw)) { + /* clear xmac errors */ + if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) + skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); + if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) + skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); + } else { + /* Timestamp (unused) overflow */ + if (hwstatus & IS_IRQ_TIST_OV) + skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + } + + if (hwstatus & IS_RAM_RD_PAR) { + dev_err(&pdev->dev, "Ram read data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); + } + + if (hwstatus & IS_RAM_WR_PAR) { + dev_err(&pdev->dev, "Ram write data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); + } + + if (hwstatus & IS_M1_PAR_ERR) + skge_mac_parity(hw, 0); + + if (hwstatus & IS_M2_PAR_ERR) + skge_mac_parity(hw, 1); + + if (hwstatus & IS_R1_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[0]->name); + skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & IS_R2_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[1]->name); + skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", + pci_cmd, pci_status); + + /* Write the error bits back to clear them. */ + pci_status &= PCI_STATUS_ERROR_BITS; + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_write_config_word(pdev, PCI_COMMAND, + pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); + pci_write_config_word(pdev, PCI_STATUS, pci_status); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* if error still set then just ignore it */ + hwstatus = skge_read32(hw, B0_HWE_ISRC); + if (hwstatus & IS_IRQ_STAT) { + dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + } +} + +/* + * Interrupt from PHY are handled in tasklet (softirq) + * because accessing phy registers requires spin wait which might + * cause excess interrupt latency. + */ +static void skge_extirq(unsigned long arg) +{ + struct skge_hw *hw = (struct skge_hw *) arg; + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *dev = hw->dev[port]; + + if (netif_running(dev)) { + struct skge_port *skge = netdev_priv(dev); + + spin_lock(&hw->phy_lock); + if (!is_genesis(hw)) + yukon_phy_intr(skge); + else if (hw->phy_type == SK_PHY_BCOM) + bcom_phy_intr(skge); + spin_unlock(&hw->phy_lock); + } + } + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= IS_EXT_REG; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); +} + +static irqreturn_t skge_intr(int irq, void *dev_id) +{ + struct skge_hw *hw = dev_id; + u32 status; + int handled = 0; + + spin_lock(&hw->hw_lock); + /* Reading this register masks IRQ */ + status = skge_read32(hw, B0_SP_ISRC); + if (status == 0 || status == ~0) + goto out; + + handled = 1; + status &= hw->intr_mask; + if (status & IS_EXT_REG) { + hw->intr_mask &= ~IS_EXT_REG; + tasklet_schedule(&hw->phy_task); + } + + if (status & (IS_XA1_F|IS_R1_F)) { + struct skge_port *skge = netdev_priv(hw->dev[0]); + hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_TX1) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); + + if (status & IS_PA_TO_RX1) { + ++hw->dev[0]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); + } + + + if (status & IS_MAC1) + skge_mac_intr(hw, 0); + + if (hw->dev[1]) { + struct skge_port *skge = netdev_priv(hw->dev[1]); + + if (status & (IS_XA2_F|IS_R2_F)) { + hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_RX2) { + ++hw->dev[1]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); + } + + if (status & IS_PA_TO_TX2) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); + + if (status & IS_MAC2) + skge_mac_intr(hw, 1); + } + + if (status & IS_HW_ERR) + skge_error_irq(hw); + + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); +out: + spin_unlock(&hw->hw_lock); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void skge_netpoll(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + disable_irq(dev->irq); + skge_intr(dev->irq, skge->hw); + enable_irq(dev->irq); +} +#endif + +static int skge_set_mac_address(struct net_device *dev, void *p) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + unsigned port = skge->port; + const struct sockaddr *addr = p; + u16 ctrl; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + if (!netif_running(dev)) { + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + } else { + /* disable Rx */ + spin_lock_bh(&hw->phy_lock); + ctrl = gma_read16(hw, port, GM_GP_CTRL); + gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); + + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + + if (is_genesis(hw)) + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + else { + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + } + + gma_write16(hw, port, GM_GP_CTRL, ctrl); + spin_unlock_bh(&hw->phy_lock); + } + + return 0; +} + +static const struct { + u8 id; + const char *name; +} skge_chips[] = { + { CHIP_ID_GENESIS, "Genesis" }, + { CHIP_ID_YUKON, "Yukon" }, + { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, + { CHIP_ID_YUKON_LP, "Yukon-LP"}, +}; + +static const char *skge_board_name(const struct skge_hw *hw) +{ + int i; + static char buf[16]; + + for (i = 0; i < ARRAY_SIZE(skge_chips); i++) + if (skge_chips[i].id == hw->chip_id) + return skge_chips[i].name; + + snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); + return buf; +} + + +/* + * Setup the board data structure, but don't bring up + * the port(s) + */ +static int skge_reset(struct skge_hw *hw) +{ + u32 reg; + u16 ctst, pci_status; + u8 t8, mac_cfg, pmd_type; + int i; + + ctst = skge_read16(hw, B0_CTST); + + /* do a SW reset */ + skge_write8(hw, B0_CTST, CS_RST_SET); + skge_write8(hw, B0_CTST, CS_RST_CLR); + + /* clear PCI errors, if any */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + skge_write8(hw, B2_TST_CTRL2, 0); + + pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); + pci_write_config_word(hw->pdev, PCI_STATUS, + pci_status | PCI_STATUS_ERROR_BITS); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + skge_write8(hw, B0_CTST, CS_MRST_CLR); + + /* restore CLK_RUN bits (for Yukon-Lite) */ + skge_write16(hw, B0_CTST, + ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); + + hw->chip_id = skge_read8(hw, B2_CHIP_ID); + hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; + pmd_type = skge_read8(hw, B2_PMD_TYP); + hw->copper = (pmd_type == 'T' || pmd_type == '1'); + + switch (hw->chip_id) { + case CHIP_ID_GENESIS: +#ifdef CONFIG_SKGE_GENESIS + switch (hw->phy_type) { + case SK_PHY_XMAC: + hw->phy_addr = PHY_ADDR_XMAC; + break; + case SK_PHY_BCOM: + hw->phy_addr = PHY_ADDR_BCOM; + break; + default: + dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", + hw->phy_type); + return -EOPNOTSUPP; + } + break; +#else + dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); + return -EOPNOTSUPP; +#endif + + case CHIP_ID_YUKON: + case CHIP_ID_YUKON_LITE: + case CHIP_ID_YUKON_LP: + if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') + hw->copper = 1; + + hw->phy_addr = PHY_ADDR_MARV; + break; + + default: + dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", + hw->chip_id); + return -EOPNOTSUPP; + } + + mac_cfg = skge_read8(hw, B2_MAC_CFG); + hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; + hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; + + /* read the adapters RAM size */ + t8 = skge_read8(hw, B2_E_0); + if (is_genesis(hw)) { + if (t8 == 3) { + /* special case: 4 x 64k x 36, offset = 0x80000 */ + hw->ram_size = 0x100000; + hw->ram_offset = 0x80000; + } else + hw->ram_size = t8 * 512; + } else if (t8 == 0) + hw->ram_size = 0x20000; + else + hw->ram_size = t8 * 4096; + + hw->intr_mask = IS_HW_ERR; + + /* Use PHY IRQ for all but fiber based Genesis board */ + if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) + hw->intr_mask |= IS_EXT_REG; + + if (is_genesis(hw)) + genesis_init(hw); + else { + /* switch power to VCC (WA for VAUX problem) */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); + + /* avoid boards with stuck Hardware error bits */ + if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && + (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { + dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + + /* Clear PHY COMA */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); + reg &= ~PCI_PHY_COMA; + pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + + for (i = 0; i < hw->ports; i++) { + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + } + } + + /* turn off hardware timer (unused) */ + skge_write8(hw, B2_TI_CTRL, TIM_STOP); + skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); + skge_write8(hw, B0_LED, LED_STAT_ON); + + /* enable the Tx Arbiters */ + for (i = 0; i < hw->ports; i++) + skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); + + /* Initialize ram interface */ + skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); + + skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); + + skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); + + /* Set interrupt moderation for Transmit only + * Receive interrupts avoided by NAPI + */ + skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + + skge_write32(hw, B0_IMSK, hw->intr_mask); + + for (i = 0; i < hw->ports; i++) { + if (is_genesis(hw)) + genesis_reset(hw, i); + else + yukon_reset(hw, i); + } + + return 0; +} + + +#ifdef CONFIG_SKGE_DEBUG + +static struct dentry *skge_debug; + +static int skge_debug_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + const struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + const struct skge_element *e; + + if (!netif_running(dev)) + return -ENETDOWN; + + seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), + skge_read32(hw, B0_IMSK)); + + seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + const struct skge_tx_desc *t = e->desc; + seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", + t->control, t->dma_hi, t->dma_lo, t->status, + t->csum_offs, t->csum_write, t->csum_start); + } + + seq_printf(seq, "\nRx Ring:\n"); + for (e = skge->rx_ring.to_clean; ; e = e->next) { + const struct skge_rx_desc *r = e->desc; + + if (r->control & BMU_OWN) + break; + + seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", + r->control, r->dma_hi, r->dma_lo, r->status, + r->timestamp, r->csum1, r->csum1_start); + } + + return 0; +} + +static int skge_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, skge_debug_show, inode->i_private); +} + +static const struct file_operations skge_debug_fops = { + .owner = THIS_MODULE, + .open = skge_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Use network device events to create/remove/rename + * debugfs file entries + */ +static int skge_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = ptr; + struct skge_port *skge; + struct dentry *d; + + if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) + goto done; + + skge = netdev_priv(dev); + switch (event) { + case NETDEV_CHANGENAME: + if (skge->debugfs) { + d = debugfs_rename(skge_debug, skge->debugfs, + skge_debug, dev->name); + if (d) + skge->debugfs = d; + else { + netdev_info(dev, "rename failed\n"); + debugfs_remove(skge->debugfs); + } + } + break; + + case NETDEV_GOING_DOWN: + if (skge->debugfs) { + debugfs_remove(skge->debugfs); + skge->debugfs = NULL; + } + break; + + case NETDEV_UP: + d = debugfs_create_file(dev->name, S_IRUGO, + skge_debug, dev, + &skge_debug_fops); + if (!d || IS_ERR(d)) + netdev_info(dev, "debugfs create failed\n"); + else + skge->debugfs = d; + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block skge_notifier = { + .notifier_call = skge_device_event, +}; + + +static __init void skge_debug_init(void) +{ + struct dentry *ent; + + ent = debugfs_create_dir("skge", NULL); + if (!ent || IS_ERR(ent)) { + pr_info("debugfs create directory failed\n"); + return; + } + + skge_debug = ent; + register_netdevice_notifier(&skge_notifier); +} + +static __exit void skge_debug_cleanup(void) +{ + if (skge_debug) { + unregister_netdevice_notifier(&skge_notifier); + debugfs_remove(skge_debug); + skge_debug = NULL; + } +} + +#else +#define skge_debug_init() +#define skge_debug_cleanup() +#endif + +static const struct net_device_ops skge_netdev_ops = { + .ndo_open = skge_up, + .ndo_stop = skge_down, + .ndo_start_xmit = skge_xmit_frame, + .ndo_do_ioctl = skge_ioctl, + .ndo_get_stats = skge_get_stats, + .ndo_tx_timeout = skge_tx_timeout, + .ndo_change_mtu = skge_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = skge_set_multicast, + .ndo_set_mac_address = skge_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = skge_netpoll, +#endif +}; + + +/* Initialize network device */ +static struct net_device *skge_devinit(struct skge_hw *hw, int port, + int highmem) +{ + struct skge_port *skge; + struct net_device *dev = alloc_etherdev(sizeof(*skge)); + + if (!dev) { + dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); + return NULL; + } + + SET_NETDEV_DEV(dev, &hw->pdev->dev); + dev->netdev_ops = &skge_netdev_ops; + dev->ethtool_ops = &skge_ethtool_ops; + dev->watchdog_timeo = TX_WATCHDOG; + dev->irq = hw->pdev->irq; + + if (highmem) + dev->features |= NETIF_F_HIGHDMA; + + skge = netdev_priv(dev); + netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); + skge->netdev = dev; + skge->hw = hw; + skge->msg_enable = netif_msg_init(debug, default_msg); + + skge->tx_ring.count = DEFAULT_TX_RING_SIZE; + skge->rx_ring.count = DEFAULT_RX_RING_SIZE; + + /* Auto speed and flow control */ + skge->autoneg = AUTONEG_ENABLE; + skge->flow_control = FLOW_MODE_SYM_OR_REM; + skge->duplex = -1; + skge->speed = -1; + skge->advertising = skge_supported_modes(hw); + + if (device_can_wakeup(&hw->pdev->dev)) { + skge->wol = wol_supported(hw) & WAKE_MAGIC; + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + } + + hw->dev[port] = dev; + + skge->port = port; + + /* Only used for Genesis XMAC */ + if (is_genesis(hw)) + setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); + else { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= dev->hw_features; + } + + /* read the mac address */ + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + return dev; +} + +static void __devinit skge_show_addr(struct net_device *dev) +{ + const struct skge_port *skge = netdev_priv(dev); + + netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); +} + +static int only_32bit_dma; + +static int __devinit skge_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev, *dev1; + struct skge_hw *hw; + int err, using_dac = 0; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + using_dac = 0; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_out_free_regions; + } + +#ifdef __BIG_ENDIAN + /* byte swap descriptors in hardware */ + { + u32 reg; + + pci_read_config_dword(pdev, PCI_DEV_REG2, ®); + reg |= PCI_REV_DESC; + pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + } +#endif + + err = -ENOMEM; + /* space for skge@pci:0000:04:00.0 */ + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); + if (!hw) { + dev_err(&pdev->dev, "cannot allocate hardware struct\n"); + goto err_out_free_regions; + } + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); + + hw->pdev = pdev; + spin_lock_init(&hw->hw_lock); + spin_lock_init(&hw->phy_lock); + tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); + + hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + if (!hw->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_out_free_hw; + } + + err = skge_reset(hw); + if (err) + goto err_out_iounmap; + + pr_info("%s addr 0x%llx irq %d chip %s rev %d\n", + DRV_VERSION, + (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, + skge_board_name(hw), hw->chip_rev); + + dev = skge_devinit(hw, 0, using_dac); + if (!dev) + goto err_out_led_off; + + /* Some motherboards are broken and has zero in ROM. */ + if (!is_valid_ether_addr(dev->dev_addr)) + dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "cannot register net device\n"); + goto err_out_free_netdev; + } + + err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw); + if (err) { + dev_err(&pdev->dev, "%s: cannot assign irq %d\n", + dev->name, pdev->irq); + goto err_out_unregister; + } + skge_show_addr(dev); + + if (hw->ports > 1) { + dev1 = skge_devinit(hw, 1, using_dac); + if (dev1 && register_netdev(dev1) == 0) + skge_show_addr(dev1); + else { + /* Failure to register second port need not be fatal */ + dev_warn(&pdev->dev, "register of second port failed\n"); + hw->dev[1] = NULL; + hw->ports = 1; + if (dev1) + free_netdev(dev1); + } + } + pci_set_drvdata(pdev, hw); + + return 0; + +err_out_unregister: + unregister_netdev(dev); +err_out_free_netdev: + free_netdev(dev); +err_out_led_off: + skge_write16(hw, B0_LED, LED_STAT_OFF); +err_out_iounmap: + iounmap(hw->regs); +err_out_free_hw: + kfree(hw); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +err_out: + return err; +} + +static void __devexit skge_remove(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + struct net_device *dev0, *dev1; + + if (!hw) + return; + + dev1 = hw->dev[1]; + if (dev1) + unregister_netdev(dev1); + dev0 = hw->dev[0]; + unregister_netdev(dev0); + + tasklet_disable(&hw->phy_task); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask = 0; + skge_write32(hw, B0_IMSK, 0); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + + skge_write16(hw, B0_LED, LED_STAT_OFF); + skge_write8(hw, B0_CTST, CS_RST_SET); + + free_irq(pdev->irq, hw); + pci_release_regions(pdev); + pci_disable_device(pdev); + if (dev1) + free_netdev(dev1); + free_netdev(dev0); + + iounmap(hw->regs); + kfree(hw); + pci_set_drvdata(pdev, NULL); +} + +#ifdef CONFIG_PM +static int skge_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct skge_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return 0; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (netif_running(dev)) + skge_down(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + skge_write32(hw, B0_IMSK, 0); + + return 0; +} + +static int skge_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct skge_hw *hw = pci_get_drvdata(pdev); + int i, err; + + if (!hw) + return 0; + + err = skge_reset(hw); + if (err) + goto out; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + + if (netif_running(dev)) { + err = skge_up(dev); + + if (err) { + netdev_err(dev, "could not up: %d\n", err); + dev_close(dev); + goto out; + } + } + } +out: + return err; +} + +static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); +#define SKGE_PM_OPS (&skge_pm_ops) + +#else + +#define SKGE_PM_OPS NULL +#endif + +static void skge_shutdown(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +} + +static struct pci_driver skge_driver = { + .name = DRV_NAME, + .id_table = skge_id_table, + .probe = skge_probe, + .remove = __devexit_p(skge_remove), + .shutdown = skge_shutdown, + .driver.pm = SKGE_PM_OPS, +}; + +static struct dmi_system_id skge_32bit_dma_boards[] = { + { + .ident = "Gigabyte nForce boards", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), + DMI_MATCH(DMI_BOARD_NAME, "nForce"), + }, + }, + {} +}; + +static int __init skge_init_module(void) +{ + if (dmi_check_system(skge_32bit_dma_boards)) + only_32bit_dma = 1; + skge_debug_init(); + return pci_register_driver(&skge_driver); +} + +static void __exit skge_cleanup_module(void) +{ + pci_unregister_driver(&skge_driver); + skge_debug_cleanup(); +} + +module_init(skge_init_module); +module_exit(skge_cleanup_module); diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h new file mode 100644 index 000000000000..a2eb34115844 --- /dev/null +++ b/drivers/net/ethernet/marvell/skge.h @@ -0,0 +1,2584 @@ +/* + * Definitions for the new Marvell Yukon / SysKonnect driver. + */ +#ifndef _SKGE_H +#define _SKGE_H +#include + +/* PCI config registers */ +#define PCI_DEV_REG1 0x40 +#define PCI_PHY_COMA 0x8000000 +#define PCI_VIO 0x2000000 + +#define PCI_DEV_REG2 0x44 +#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */ +#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */ + +#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | \ + PCI_STATUS_REC_MASTER_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_PARITY) + +enum csr_regs { + B0_RAP = 0x0000, + B0_CTST = 0x0004, + B0_LED = 0x0006, + B0_POWER_CTRL = 0x0007, + B0_ISRC = 0x0008, + B0_IMSK = 0x000c, + B0_HWE_ISRC = 0x0010, + B0_HWE_IMSK = 0x0014, + B0_SP_ISRC = 0x0018, + B0_XM1_IMSK = 0x0020, + B0_XM1_ISRC = 0x0028, + B0_XM1_PHY_ADDR = 0x0030, + B0_XM1_PHY_DATA = 0x0034, + B0_XM2_IMSK = 0x0040, + B0_XM2_ISRC = 0x0048, + B0_XM2_PHY_ADDR = 0x0050, + B0_XM2_PHY_DATA = 0x0054, + B0_R1_CSR = 0x0060, + B0_R2_CSR = 0x0064, + B0_XS1_CSR = 0x0068, + B0_XA1_CSR = 0x006c, + B0_XS2_CSR = 0x0070, + B0_XA2_CSR = 0x0074, + + B2_MAC_1 = 0x0100, + B2_MAC_2 = 0x0108, + B2_MAC_3 = 0x0110, + B2_CONN_TYP = 0x0118, + B2_PMD_TYP = 0x0119, + B2_MAC_CFG = 0x011a, + B2_CHIP_ID = 0x011b, + B2_E_0 = 0x011c, + B2_E_1 = 0x011d, + B2_E_2 = 0x011e, + B2_E_3 = 0x011f, + B2_FAR = 0x0120, + B2_FDP = 0x0124, + B2_LD_CTRL = 0x0128, + B2_LD_TEST = 0x0129, + B2_TI_INI = 0x0130, + B2_TI_VAL = 0x0134, + B2_TI_CTRL = 0x0138, + B2_TI_TEST = 0x0139, + B2_IRQM_INI = 0x0140, + B2_IRQM_VAL = 0x0144, + B2_IRQM_CTRL = 0x0148, + B2_IRQM_TEST = 0x0149, + B2_IRQM_MSK = 0x014c, + B2_IRQM_HWE_MSK = 0x0150, + B2_TST_CTRL1 = 0x0158, + B2_TST_CTRL2 = 0x0159, + B2_GP_IO = 0x015c, + B2_I2C_CTRL = 0x0160, + B2_I2C_DATA = 0x0164, + B2_I2C_IRQ = 0x0168, + B2_I2C_SW = 0x016c, + B2_BSC_INI = 0x0170, + B2_BSC_VAL = 0x0174, + B2_BSC_CTRL = 0x0178, + B2_BSC_STAT = 0x0179, + B2_BSC_TST = 0x017a, + + B3_RAM_ADDR = 0x0180, + B3_RAM_DATA_LO = 0x0184, + B3_RAM_DATA_HI = 0x0188, + B3_RI_WTO_R1 = 0x0190, + B3_RI_WTO_XA1 = 0x0191, + B3_RI_WTO_XS1 = 0x0192, + B3_RI_RTO_R1 = 0x0193, + B3_RI_RTO_XA1 = 0x0194, + B3_RI_RTO_XS1 = 0x0195, + B3_RI_WTO_R2 = 0x0196, + B3_RI_WTO_XA2 = 0x0197, + B3_RI_WTO_XS2 = 0x0198, + B3_RI_RTO_R2 = 0x0199, + B3_RI_RTO_XA2 = 0x019a, + B3_RI_RTO_XS2 = 0x019b, + B3_RI_TO_VAL = 0x019c, + B3_RI_CTRL = 0x01a0, + B3_RI_TEST = 0x01a2, + B3_MA_TOINI_RX1 = 0x01b0, + B3_MA_TOINI_RX2 = 0x01b1, + B3_MA_TOINI_TX1 = 0x01b2, + B3_MA_TOINI_TX2 = 0x01b3, + B3_MA_TOVAL_RX1 = 0x01b4, + B3_MA_TOVAL_RX2 = 0x01b5, + B3_MA_TOVAL_TX1 = 0x01b6, + B3_MA_TOVAL_TX2 = 0x01b7, + B3_MA_TO_CTRL = 0x01b8, + B3_MA_TO_TEST = 0x01ba, + B3_MA_RCINI_RX1 = 0x01c0, + B3_MA_RCINI_RX2 = 0x01c1, + B3_MA_RCINI_TX1 = 0x01c2, + B3_MA_RCINI_TX2 = 0x01c3, + B3_MA_RCVAL_RX1 = 0x01c4, + B3_MA_RCVAL_RX2 = 0x01c5, + B3_MA_RCVAL_TX1 = 0x01c6, + B3_MA_RCVAL_TX2 = 0x01c7, + B3_MA_RC_CTRL = 0x01c8, + B3_MA_RC_TEST = 0x01ca, + B3_PA_TOINI_RX1 = 0x01d0, + B3_PA_TOINI_RX2 = 0x01d4, + B3_PA_TOINI_TX1 = 0x01d8, + B3_PA_TOINI_TX2 = 0x01dc, + B3_PA_TOVAL_RX1 = 0x01e0, + B3_PA_TOVAL_RX2 = 0x01e4, + B3_PA_TOVAL_TX1 = 0x01e8, + B3_PA_TOVAL_TX2 = 0x01ec, + B3_PA_CTRL = 0x01f0, + B3_PA_TEST = 0x01f2, +}; + +/* B0_CTST 16 bit Control/Status register */ +enum { + CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */ + CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */ + CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */ + CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */ + CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */ + CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */ + CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ + CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ + CS_STOP_DONE = 1<<5, /* Stop Master is finished */ + CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ + CS_MRST_CLR = 1<<3, /* Clear Master reset */ + CS_MRST_SET = 1<<2, /* Set Master reset */ + CS_RST_CLR = 1<<1, /* Clear Software reset */ + CS_RST_SET = 1, /* Set Software reset */ + +/* B0_LED 8 Bit LED register */ +/* Bit 7.. 2: reserved */ + LED_STAT_ON = 1<<1, /* Status LED on */ + LED_STAT_OFF = 1, /* Status LED off */ + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ + PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ + PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ + PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ + PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ + PC_VAUX_ON = 1<<3, /* Switch VAUX On */ + PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ + PC_VCC_ON = 1<<1, /* Switch VCC On */ + PC_VCC_OFF = 1<<0, /* Switch VCC Off */ +}; + +/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ +enum { + IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */ + IS_HW_ERR = 1<<31, /* Interrupt HW Error */ + /* Bit 30: reserved */ + IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */ + IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */ + IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */ + IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */ + IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */ + IS_IRQ_SW = 1<<24, /* SW forced IRQ */ + IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */ + /* IRQ from PHY (YUKON only) */ + IS_TIMINT = 1<<22, /* IRQ from Timer */ + IS_MAC1 = 1<<21, /* IRQ from MAC 1 */ + IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */ + IS_MAC2 = 1<<19, /* IRQ from MAC 2 */ + IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */ +/* Receive Queue 1 */ + IS_R1_B = 1<<17, /* Q_R1 End of Buffer */ + IS_R1_F = 1<<16, /* Q_R1 End of Frame */ + IS_R1_C = 1<<15, /* Q_R1 Encoding Error */ +/* Receive Queue 2 */ + IS_R2_B = 1<<14, /* Q_R2 End of Buffer */ + IS_R2_F = 1<<13, /* Q_R2 End of Frame */ + IS_R2_C = 1<<12, /* Q_R2 Encoding Error */ +/* Synchronous Transmit Queue 1 */ + IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */ + IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */ + IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */ +/* Asynchronous Transmit Queue 1 */ + IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */ + IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */ + IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */ +/* Synchronous Transmit Queue 2 */ + IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */ + IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */ + IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */ +/* Asynchronous Transmit Queue 2 */ + IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */ + IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ + IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ + + IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1, + IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2, + + IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1, + IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2, +}; + + +/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ +enum { + IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ + IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ + IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ + IS_IRQ_STAT = 1<<10, /* IRQ status exception */ + IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ + IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ + IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ + IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ + IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ + IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ + IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ + IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ + IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ + IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ + + IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT + | IS_RAM_RD_PAR | IS_RAM_WR_PAR + | IS_M1_PAR_ERR | IS_M2_PAR_ERR + | IS_R1_PAR_ERR | IS_R2_PAR_ERR, +}; + +/* B2_TST_CTRL1 8 bit Test Control Register 1 */ +enum { + TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ + TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ + TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ + TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ + TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ + TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ + TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ + TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ +}; + +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +enum { + CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ + CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ + CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ +}; + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +enum { + CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */ + CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */ + CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */ + CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */ + CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ + CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ + CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ + + CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */ + CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ +}; + +/* B2_TI_CTRL 8 bit Timer control */ +/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ +enum { + TIM_START = 1<<2, /* Start Timer */ + TIM_STOP = 1<<1, /* Stop Timer */ + TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ +}; + +/* B2_TI_TEST 8 Bit Timer Test */ +/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ +/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ +enum { + TIM_T_ON = 1<<2, /* Test mode on */ + TIM_T_OFF = 1<<1, /* Test mode off */ + TIM_T_STEP = 1<<0, /* Test step */ +}; + +/* B2_GP_IO 32 bit General Purpose I/O Register */ +enum { + GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ + GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */ + GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */ + GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */ + GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */ + GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */ + GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */ + GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */ + GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */ + GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */ + + GP_IO_9 = 1<<9, /* IO_9 pin */ + GP_IO_8 = 1<<8, /* IO_8 pin */ + GP_IO_7 = 1<<7, /* IO_7 pin */ + GP_IO_6 = 1<<6, /* IO_6 pin */ + GP_IO_5 = 1<<5, /* IO_5 pin */ + GP_IO_4 = 1<<4, /* IO_4 pin */ + GP_IO_3 = 1<<3, /* IO_3 pin */ + GP_IO_2 = 1<<2, /* IO_2 pin */ + GP_IO_1 = 1<<1, /* IO_1 pin */ + GP_IO_0 = 1<<0, /* IO_0 pin */ +}; + +/* Descriptor Bit Definition */ +/* TxCtrl Transmit Buffer Control Field */ +/* RxCtrl Receive Buffer Control Field */ +enum { + BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */ + BMU_STF = 1<<30, /* Start of Frame */ + BMU_EOF = 1<<29, /* End of Frame */ + BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */ + BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */ + /* TxCtrl specific bits */ + BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */ + BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */ + BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */ + /* RxCtrl specific bits */ + BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */ + BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */ + BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */ + /* Bit 23..16: BMU Check Opcodes */ + BMU_CHECK = 0x55<<16, /* Default BMU check */ + BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */ + BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */ + BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */ +}; + +/* B2_BSC_CTRL 8 bit Blink Source Counter Control */ +enum { + BSC_START = 1<<1, /* Start Blink Source Counter */ + BSC_STOP = 1<<0, /* Stop Blink Source Counter */ +}; + +/* B2_BSC_STAT 8 bit Blink Source Counter Status */ +enum { + BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */ +}; + +/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ +enum { + BSC_T_ON = 1<<2, /* Test mode on */ + BSC_T_OFF = 1<<1, /* Test mode off */ + BSC_T_STEP = 1<<0, /* Test step */ +}; + +/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ + /* Bit 31..19: reserved */ +#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ +/* RAM Interface Registers */ + +/* B3_RI_CTRL 16 bit RAM Iface Control Register */ +enum { + RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ + RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ + + RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ + RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ +}; + +/* MAC Arbiter Registers */ +/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ +enum { + MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */ + MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */ + MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ + +}; + +/* Timeout values */ +#define SK_MAC_TO_53 72 /* MAC arbiter timeout */ +#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */ +#define SK_PKT_TO_MAX 0xffff /* Maximum value */ +#define SK_RI_TO_53 36 /* RAM interface timeout */ + +/* Packet Arbiter Registers */ +/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ +enum { + PA_CLR_TO_TX2 = 1<<13,/* Clear IRQ Packet Timeout TX2 */ + PA_CLR_TO_TX1 = 1<<12,/* Clear IRQ Packet Timeout TX1 */ + PA_CLR_TO_RX2 = 1<<11,/* Clear IRQ Packet Timeout RX2 */ + PA_CLR_TO_RX1 = 1<<10,/* Clear IRQ Packet Timeout RX1 */ + PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */ + PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */ + PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */ + PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */ + PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */ + PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */ + PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */ + PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */ + PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ +}; + +#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\ + PA_ENA_TO_TX1 | PA_ENA_TO_TX2) + + +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ +/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ +/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ +/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ + +#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ + +/* TXA_CTRL 8 bit Tx Arbiter Control Register */ +enum { + TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ + TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ + TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ + TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ + TXA_START_RC = 1<<3, /* Start sync Rate Control */ + TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ + TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ + TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ +}; + +/* + * Bank 4 - 5 + */ +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +enum { + TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ + TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ + TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ + TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ + TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ + TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ + TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ +}; + + +enum { + B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ + B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ + B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ + B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ + B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ + B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ + B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ + B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ + B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ +}; + +/* Queue Register Offsets, use Q_ADDR() to access */ +enum { + B8_Q_REGS = 0x0400, /* base of Queue registers */ + Q_D = 0x00, /* 8*32 bit Current Descriptor */ + Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ + Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ + Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ + Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ + Q_BC = 0x30, /* 32 bit Current Byte Counter */ + Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ + Q_F = 0x38, /* 32 bit Flag Register */ + Q_T1 = 0x3c, /* 32 bit Test Register 1 */ + Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */ + Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */ + Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */ + Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */ + Q_T2 = 0x40, /* 32 bit Test Register 2 */ + Q_T3 = 0x44, /* 32 bit Test Register 3 */ + +}; +#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) + +/* RAM Buffer Register Offsets */ +enum { + + RB_START= 0x00,/* 32 bit RAM Buffer Start Address */ + RB_END = 0x04,/* 32 bit RAM Buffer End Address */ + RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ + RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ + RB_RX_UTPP= 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ + RB_RX_LTPP= 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ + RB_RX_UTHP= 0x18,/* 32 bit Rx Upper Threshold, High Prio */ + RB_RX_LTHP= 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ + RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ + RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ + RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ + RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ + RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ +}; + +/* Receive and Transmit Queues */ +enum { + Q_R1 = 0x0000, /* Receive Queue 1 */ + Q_R2 = 0x0080, /* Receive Queue 2 */ + Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ + Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ + Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ + Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ +}; + +/* Different MAC Types */ +enum { + SK_MAC_XMAC = 0, /* Xaqti XMAC II */ + SK_MAC_GMAC = 1, /* Marvell GMAC */ +}; + +/* Different PHY Types */ +enum { + SK_PHY_XMAC = 0,/* integrated in XMAC II */ + SK_PHY_BCOM = 1,/* Broadcom BCM5400 */ + SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/ + SK_PHY_NAT = 3,/* National DP83891 [not supported] */ + SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */ + SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */ +}; + +/* PHY addresses (bits 12..8 of PHY address reg) */ +enum { + PHY_ADDR_XMAC = 0<<8, + PHY_ADDR_BCOM = 1<<8, + +/* GPHY address (bits 15..11 of SMI control reg) */ + PHY_ADDR_MARV = 0, +}; + +#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs)) + +/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */ +enum { + RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */ + RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */ + + RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */ + RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */ + RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */ + RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/ + RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */ + RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */ + RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/ + RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */ + RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */ + + RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */ + RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */ + RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */ + RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */ + + LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ + LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ + LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ + LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ + LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ +}; + +/* Receive and Transmit MAC FIFO Registers (GENESIS only) */ +/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */ +enum { + MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */ + MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */ + MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */ + MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */ + MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */ + MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */ + MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */ + MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */ + MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */ + MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */ + MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */ + MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */ + MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */ + MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */ + MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT, +}; + +/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */ +enum { + MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */ + + MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */ + MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */ + + MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */ + MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */ + + MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */ + MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */ + MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */ + MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */ + + MFF_TX_CTRL_DEF = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH, +}; + + +/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */ +/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */ +enum { + MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */ + MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */ + MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */ + MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */ + MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */ + MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */ + MFF_PC_INC = 1<<0, /* Packet Counter Increment */ +}; + +/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */ +/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */ +enum { + MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */ + MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */ + MFF_WP_INC = 1<<4, /* Write Pointer Increm */ + + MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */ + MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */ + MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */ +}; + +/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */ +/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */ +enum { + MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */ + MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */ +}; + + +/* Link LED Counter Registers (GENESIS only) */ + +/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */ +/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */ +/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */ +enum { + LED_START = 1<<2, /* Start Timer */ + LED_STOP = 1<<1, /* Stop Timer */ + LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */ +}; + +/* RX_LED_TST 8 bit Receive LED Cnt Test Register */ +/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */ +/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */ +enum { + LED_T_ON = 1<<2, /* LED Counter Test mode On */ + LED_T_OFF = 1<<1, /* LED Counter Test mode Off */ + LED_T_STEP = 1<<0, /* LED Counter Step */ +}; + +/* LNK_LED_REG 8 bit Link LED Register */ +enum { + LED_BLK_ON = 1<<5, /* Link LED Blinking On */ + LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */ + LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */ + LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */ + LED_ON = 1<<1, /* switch LED on */ + LED_OFF = 1<<0, /* switch LED off */ +}; + +/* Receive GMAC FIFO (YUKON) */ +enum { + RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ + RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ + RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ + RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ + RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ + RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ + RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ + RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ + RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ +}; + + +/* TXA_TEST 8 bit Tx Arbiter Test Register */ +enum { + TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */ + TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */ + TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */ + TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */ + TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */ + TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */ +}; + +/* TXA_STAT 8 bit Tx Arbiter Status Register */ +enum { + TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */ +}; + + +/* Q_BC 32 bit Current Byte Counter */ + +/* BMU Control Status Registers */ +/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ +/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ +/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ +/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ +/* Q_CSR 32 bit BMU Control/Status Register */ + +enum { + CSR_SV_IDLE = 1<<24, /* BMU SM Idle */ + + CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */ + CSR_DESC_SET = 1<<20, /* Set Reset for Descr */ + CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */ + CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */ + CSR_HPI_RUN = 1<<17, /* Release HPI SM */ + CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */ + CSR_SV_RUN = 1<<15, /* Release Supervisor SM */ + CSR_SV_RST = 1<<14, /* Reset Supervisor SM */ + CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */ + CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */ + CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */ + CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */ + CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */ + CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */ + CSR_ENA_POL = 1<<7, /* Enable Descr Polling */ + CSR_DIS_POL = 1<<6, /* Disable Descr Polling */ + CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */ + CSR_START = 1<<4, /* Start Rx/Tx Queue */ + CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */ + CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */ + CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */ + CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */ +}; + +#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\ + CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\ + CSR_TRANS_RST) +#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\ + CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\ + CSR_TRANS_RUN) + +/* Q_F 32 bit Flag Register */ +enum { + F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */ + F_EMPTY = 1<<27, /* Tx FIFO: empty flag */ + F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */ + F_WM_REACHED = 1<<25, /* Watermark reached */ + + F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */ + F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */ +}; + +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ +/* RB_START 32 bit RAM Buffer Start Address */ +/* RB_END 32 bit RAM Buffer End Address */ +/* RB_WP 32 bit RAM Buffer Write Pointer */ +/* RB_RP 32 bit RAM Buffer Read Pointer */ +/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ +/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ +/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ +/* RB_PC 32 bit RAM Buffer Packet Counter */ +/* RB_LEV 32 bit RAM Buffer Level Register */ + +#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ +enum { + RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ + RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ + RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ + RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ +}; + +/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */ +enum { + TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */ + TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */ + TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */ + TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */ + TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */ + TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */ + TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */ + TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */ + + TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */ + TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */ + TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */ + + TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */ + TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */ + TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */ + TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */ +}; + +/* Counter and Timer constants, for a host clock of 62.5 MHz */ +#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */ +#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */ + +#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */ + +#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */ + /* 215 ms at 78.12 MHz */ + +#define SK_FACT_62 100 /* is given in percent */ +#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */ +#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */ + + +/* Transmit GMAC FIFO (YUKON only) */ +enum { + TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ + TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ + TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ + + TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ + TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ + TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ + + TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ + TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ + TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ + + /* Descriptor Poll Timer Registers */ + B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ + B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ + B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ + + B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ + + /* Time Stamp Timer Registers (YUKON only) */ + GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ + GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ + GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ +}; + + +enum { + LINKLED_OFF = 0x01, + LINKLED_ON = 0x02, + LINKLED_LINKSYNC_OFF = 0x04, + LINKLED_LINKSYNC_ON = 0x08, + LINKLED_BLINK_OFF = 0x10, + LINKLED_BLINK_ON = 0x20, +}; + +/* GMAC and GPHY Control Registers (YUKON only) */ +enum { + GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ + GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ + GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ + GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ + GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + + WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */ + + WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ + WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ + WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ + WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ + WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ + +/* WOL Pattern Length Registers (YUKON only) */ + + WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ + WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + + WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ + WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ +}; +#define WOL_REGS(port, x) (x + (port)*0x80) + +enum { + WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ + WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ +}; +#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) + +enum { + BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */ + BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ + BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */ + BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ +}; + +/* + * Receive Frame Status Encoding + */ +enum { + XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ + XMR_FS_LEN_SHIFT = 18, + XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ + XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ + XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ + XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */ + XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */ + + XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */ + XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */ + XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */ + XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */ + XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */ + XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */ + XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */ + XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */ + XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */ + XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */ + XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */ + XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */ + +/* + * XMR_FS_ERR will be set if + * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT, + * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR + * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue + * XMR_FS_ERR unless the corresponding bit in the Receive Command + * Register is set. + */ +}; + +/* +,* XMAC-PHY Registers, indirect addressed over the XMAC + */ +enum { + PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */ + PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */ + PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + + PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */ + PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */ +}; +/* + * Broadcom-PHY Registers, indirect addressed over XMAC + */ +enum { + PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Broadcom-specific registers */ + PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */ + PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */ + PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */ + PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */ + PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */ + + PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */ + PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */ + PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */ + PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */ +}; + +/* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +enum { + PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Marvel-specific registers */ + PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ + PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ + PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ + PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ + PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ + PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ + PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ + PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ + PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ + PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ + PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ + PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ + PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ + PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ + PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ + PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ + PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ + PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ + PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ + PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ +}; + +enum { + PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ + PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ + PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ + PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ + PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ + PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ + PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ + PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ + PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ + PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ +}; + +enum { + PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ + PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ + PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ +}; + +enum { + PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ + + PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ + PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ + PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ + PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ + PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ + PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ + PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ +}; + +enum { + PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ + PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ + PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ +}; + +/* different Broadcom PHY Ids */ +enum { + PHY_BCOM_ID1_A1 = 0x6041, + PHY_BCOM_ID1_B2 = 0x6043, + PHY_BCOM_ID1_C0 = 0x6044, + PHY_BCOM_ID1_C5 = 0x6047, +}; + +/* different Marvell PHY Ids */ +enum { + PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ + PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ + PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ + PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ + PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ +}; + +/* Advertisement register bits */ +enum { + PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ + + PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ + PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ + PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ + PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ + PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ + PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ + PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ + PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ + PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ + PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, + PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | + PHY_AN_100HALF | PHY_AN_100FULL, +}; + +/* Xmac Specific */ +enum { + PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ + + PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */ + PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */ + PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ +}; + +/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ +enum { + PHY_X_P_NO_PAUSE= 0<<7,/* Bit 8..7: no Pause Mode */ + PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */ + PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */ + PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */ +}; + + +/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/ +enum { + PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */ + PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */ +}; + +/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/ +enum { + PHY_X_RS_PAUSE = 3<<7, /* Bit 8..7: selected Pause Mode */ + PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */ + PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */ + PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */ + PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */ +}; + +/* Remote Fault Bits (PHY_X_AN_RFB) encoding */ +enum { + X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */ + X_RFB_LF = 1<<12,/* Bit 13..12 Link Failure */ + X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */ + X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */ +}; + +/* Broadcom-Specific */ +/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ + PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */ + PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */ + PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */ + PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */ + PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */ +}; + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ + PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ + PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ + PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ +}; + +/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/ +enum { + PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */ + PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */ + PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */ + PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */ +}; + +/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/ +enum { + PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */ + PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */ + PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */ + PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */ + PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */ + PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */ + PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */ + PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */ + PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */ + PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */ + PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */ + PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */ + PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */ + PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */ + PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */ + PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */ +}; + +/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/ +enum { + PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */ + PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */ + PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */ + PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */ + PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */ + PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */ + PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */ + PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */ + PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */ + PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */ + PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */ + PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */ + PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */ + PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ +}; + +/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +enum { + PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */ + + PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ + PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */ +}; + + +/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ +enum { + PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ + +/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ + PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */ + PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */ + +/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ + PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */ + PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */ + PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */ + /* Bit 11: reserved */ + PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */ + /* Bit 9.. 8: reserved */ + PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */ + /* Bit 6: reserved */ + PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */ + /* Bit 4: reserved */ + PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */ +}; + +/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/ +enum { + PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */ + PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */ + PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */ + PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */ + PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */ + PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */ + PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */ + PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */ + PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */ + PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */ + PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */ + PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */ + PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */ + PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */ +}; +#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT) + +/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ +enum { + PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */ + PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */ + PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */ + PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */ + PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */ + PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */ + PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */ + PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */ + PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */ + PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */ + PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */ + PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */ + PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */ + PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ + PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ +}; +#define PHY_B_DEF_MSK \ + (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \ + PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE)) + +/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ +enum { + PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */ + PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */ + PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */ + PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */ +}; +/* + * Resolved Duplex mode and Capabilities (Aux Status Summary Reg) + */ +enum { + PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */ + PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ +}; + +/** Marvell-Specific */ +enum { + PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ + PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ + PHY_M_AN_RF = 1<<13, /* Remote Fault */ + + PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ + PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ + PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ + PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ + PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ + PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ + PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ + PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ +}; + +/* special defines for FIBER (88E1011S only) */ +enum { + PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ + PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ + PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ + PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ +}; + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +enum { + PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ + PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ + PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ + PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ +}; + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */ + PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ + PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ + PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ + PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ + PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ +}; + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ +enum { + PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ + PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ + PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ + PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ + PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ + PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ + PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ + PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ + PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ + PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ + PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ + PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ +}; + +enum { + PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ + PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ +}; + +enum { + PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ + PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ + PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ + PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ + PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ + PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ + PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ + + PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ + PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ + + PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ + PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ +}; + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +enum { + PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ + PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ + PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ + PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ + PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ + PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ + PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ + PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ + PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ + PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ + PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ + PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ + PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ + PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ + PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ + PHY_M_PS_JABBER = 1<<0, /* Jabber */ +}; + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ + PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ +}; + +enum { + PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ + PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ + PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ + PHY_M_IS_AN_PR = 1<<12, /* Page Received */ + PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ + PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ + PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ + PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ + PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ + PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ + PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ + PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ + + PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ + PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ + PHY_M_IS_JABBER = 1<<0, /* Jabber */ + + PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE | + PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR, + + PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, +}; + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +enum { + PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ + PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ + + PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ + PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */ + /* (88E1111 only) */ + PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ + /* !!! Errata in spec. (1 = disable) */ + PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ + PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */ + PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ + PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ + PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ + PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */}; + +#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */ + +#define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */ + /* 100=5x; 101=6x; 110=7x; 111=8x */ +enum { + MAC_TX_CLK_0_MHZ = 2, + MAC_TX_CLK_2_5_MHZ = 6, + MAC_TX_CLK_25_MHZ = 7, +}; + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +enum { + PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ + PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ + PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ + PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ + PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ + PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ + PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ + /* (88E1111 only) */ +}; +#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) +#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) + +enum { + PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */ + /* (88E1011 only) */ + PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ + PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ + PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ + PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ + PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ +}; + +enum { + PULS_NO_STR = 0, /* no pulse stretching */ + PULS_21MS = 1, /* 21 ms to 42 ms */ + PULS_42MS = 2, /* 42 ms to 84 ms */ + PULS_84MS = 3, /* 84 ms to 170 ms */ + PULS_170MS = 4, /* 170 ms to 340 ms */ + PULS_340MS = 5, /* 340 ms to 670 ms */ + PULS_670MS = 6, /* 670 ms to 1.3 s */ + PULS_1300MS = 7, /* 1.3 s to 2.7 s */ +}; + + +enum { + BLINK_42MS = 0, /* 42 ms */ + BLINK_84MS = 1, /* 84 ms */ + BLINK_170MS = 2, /* 170 ms */ + BLINK_340MS = 3, /* 340 ms */ + BLINK_670MS = 4, /* 670 ms */ +}; + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ + /* Bit 13..12: reserved */ +#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ + +enum { + MO_LED_NORM = 0, + MO_LED_BLINK = 1, + MO_LED_OFF = 2, + MO_LED_ON = 3, +}; + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ +enum { + PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ + PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ + PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ + PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ + PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */ +}; + +/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ +enum { + PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ + PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ + PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ + PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ + PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ + PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ + PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ + /* (88E1111 only) */ + /* Bit 9.. 4: reserved (88E1011 only) */ + PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ + PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ + PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ +}; + +/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ +enum { + PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */ + PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */ + /* (88E1111 only) */ + PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */ + PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */ + /* (88E1111 only) */ + PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */ +}; + +/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ +enum { + CABD_STAT_NORMAL= 0, + CABD_STAT_SHORT = 1, + CABD_STAT_OPEN = 2, + CABD_STAT_FAIL = 3, +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ + /* Bit 15..12: reserved (used internally) */ +enum { + PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ + PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ + PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ +}; + +#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK) +#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK) +#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK) + +enum { + LED_PAR_CTRL_COLX = 0x00, + LED_PAR_CTRL_ERROR = 0x01, + LED_PAR_CTRL_DUPLEX = 0x02, + LED_PAR_CTRL_DP_COL = 0x03, + LED_PAR_CTRL_SPEED = 0x04, + LED_PAR_CTRL_LINK = 0x05, + LED_PAR_CTRL_TX = 0x06, + LED_PAR_CTRL_RX = 0x07, + LED_PAR_CTRL_ACT = 0x08, + LED_PAR_CTRL_LNK_RX = 0x09, + LED_PAR_CTRL_LNK_AC = 0x0a, + LED_PAR_CTRL_ACT_BL = 0x0b, + LED_PAR_CTRL_TX_BL = 0x0c, + LED_PAR_CTRL_RX_BL = 0x0d, + LED_PAR_CTRL_COL_BL = 0x0e, + LED_PAR_CTRL_INACT = 0x0f +}; + +/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ +enum { + PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ + PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ + PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ +}; + + +/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ +enum { + PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */ + PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ + PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */ + PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ +}; + +#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) +#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) +#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) +#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) + +/* GMAC registers */ +/* Port Registers */ +enum { + GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ + GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ + GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ + GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ + GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ + GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ + GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ +/* Source Address Registers */ + GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ + GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ + GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ + GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ + GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ + GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ + GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ + GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ + GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ + GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ + GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ + GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ + GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ + GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ + GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ + GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ + GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ + GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ + GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ +}; + +/* MIB Counters */ +#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ +#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +enum { + GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ + GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ + GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ + GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ + GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ + /* GM_MIB_CNT_BASE + 40: reserved */ + GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ + GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ + GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ + GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ + GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ + GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ + GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ + GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */ + GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */ + GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */ + GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */ + GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */ + GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */ + GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */ + GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */ + /* GM_MIB_CNT_BASE + 168: reserved */ + GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */ + /* GM_MIB_CNT_BASE + 184: reserved */ + GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */ + GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */ + GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */ + GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */ + GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */ + GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */ + GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */ + GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */ + GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */ + GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */ + GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */ + GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */ + GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */ + + GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */ + GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */ + GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */ + GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */ + GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */ + GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */ +}; + +/* GMAC Bit Definitions */ +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ +enum { + GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ + GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ + GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ + GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ + GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ + GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ + GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ + GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ + + GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ + GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ + GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ + GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ + GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ +}; + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ +enum { + GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ + GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ + GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ + GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ + GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ + GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ + GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ + GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ + GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ + GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ + GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ + GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ + GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ + GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ + GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ +}; + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) +#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ +enum { + GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ + GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ + GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ + GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ +}; + +#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) +#define TX_COL_DEF 0x04 /* late collision after 64 byte */ + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +enum { + GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ + GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ + GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ + GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ +}; + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +enum { + GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ + GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ + GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ + + TX_JAM_LEN_DEF = 0x03, + TX_JAM_IPG_DEF = 0x0b, + TX_IPG_JAM_DEF = 0x1c, +}; + +#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) +#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) +#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) + + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +enum { + GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ + GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ + GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ + GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ + GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ +}; + +#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) +#define DATA_BLIND_DEF 0x04 + +#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) +#define IPG_DATA_DEF 0x1e + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ +enum { + GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */ + GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */ + GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ + GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ + GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ +}; + +#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) +#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) + +/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ +enum { + GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ + GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ +}; + +/* Receive Frame Status Encoding */ +enum { + GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ + GMR_FS_LEN_SHIFT = 16, + GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ + GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ + GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ + GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */ + GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */ + GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */ + GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */ + GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */ + GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */ + GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */ + GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */ + + GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */ + GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */ + +/* + * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) + */ + GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | + GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | + GMR_FS_JABBER, +/* Rx GMAC FIFO Flush Mask (default) */ + RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | + GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER, +}; + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ +enum { + GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ + GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ + GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ + + GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ + GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ + GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ + GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ + GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ + GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ + GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */ + GMF_OPER_ON = 1<<3, /* Operational Mode On */ + GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ + GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ + GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ + + RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ +}; + + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ +enum { + GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */ + GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */ + GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */ + + GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ + GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ + GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ +}; + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ +enum { + GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ + GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ + GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ +}; + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ +enum { + GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ + GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ + GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ + GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ + GMC_PAUSE_ON = 1<<3, /* Pause On */ + GMC_PAUSE_OFF = 1<<2, /* Pause Off */ + GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ + GMC_RST_SET = 1<<0, /* Set GMAC Reset */ +}; + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ +enum { + GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */ + GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */ + GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */ + GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */ + GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */ + GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */ + GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */ + GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */ + GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */ + GPC_ANEG_0 = 1<<19, /* ANEG[0] */ + GPC_ENA_XC = 1<<18, /* Enable MDI crossover */ + GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */ + GPC_ANEG_3 = 1<<16, /* ANEG[3] */ + GPC_ANEG_2 = 1<<15, /* ANEG[2] */ + GPC_ANEG_1 = 1<<14, /* ANEG[1] */ + GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */ + GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */ + GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */ + GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */ + GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */ + GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */ + /* Bits 7..2: reserved */ + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ + GPC_RST_SET = 1<<0, /* Set GPHY Reset */ +}; + +#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0) + +/* forced speed and duplex mode (don't mix with other ANEG bits) */ +#define GPC_FRC10MBIT_HALF 0 +#define GPC_FRC10MBIT_FULL GPC_ANEG_0 +#define GPC_FRC100MBIT_HALF GPC_ANEG_1 +#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1) + +/* auto-negotiation with limited advertised speeds */ +/* mix only with master/slave settings (for copper) */ +#define GPC_ADV_1000_HALF GPC_ANEG_2 +#define GPC_ADV_1000_FULL GPC_ANEG_3 +#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3) + +/* master/slave settings */ +/* only for copper with 1000 Mbps */ +#define GPC_FORCE_MASTER 0 +#define GPC_FORCE_SLAVE GPC_ANEG_0 +#define GPC_PREF_MASTER GPC_ANEG_1 +#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0) + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +enum { + GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ + GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ + GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ + GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ + GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ + GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ + +#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR) + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ + /* Bits 15.. 2: reserved */ + GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ + GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ + WOL_CTL_LINK_CHG_OCC = 1<<15, + WOL_CTL_MAGIC_PKT_OCC = 1<<14, + WOL_CTL_PATTERN_OCC = 1<<13, + WOL_CTL_CLEAR_RESULT = 1<<12, + WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, + WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, + WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, + WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, + WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, + WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, + WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, + WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, + WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, + WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, + WOL_CTL_ENA_PATTERN_UNIT = 1<<1, + WOL_CTL_DIS_PATTERN_UNIT = 1<<0, +}; + +#define WOL_CTL_DEFAULT \ + (WOL_CTL_DIS_PME_ON_LINK_CHG | \ + WOL_CTL_DIS_PME_ON_PATTERN | \ + WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ + WOL_CTL_DIS_LINK_CHG_UNIT | \ + WOL_CTL_DIS_PATTERN_UNIT | \ + WOL_CTL_DIS_MAGIC_PKT_UNIT) + +/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ +#define WOL_CTL_PATT_ENA(x) (1 << (x)) + + +/* XMAC II registers */ +enum { + XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */ + XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */ + XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/ + XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */ + XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */ + XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */ + XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */ + XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */ + XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */ + XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */ + XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */ + XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */ + XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */ + XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */ + XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */ + XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */ + XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */ + XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */ + XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */ + XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */ + XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */ + XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */ + XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */ + XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/ + XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */ + + XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */ +#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3)) +}; + +enum { + XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */ + XM_SA = 0x0108, /* NA reg r/w Station Address Register */ + XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */ + XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */ + XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */ + XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */ + XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */ + XM_MODE = 0x0124, /* 32 bit r/w Mode Register */ + XM_LSA = 0x0128, /* NA reg r/o Last Source Register */ + XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */ + XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */ + XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */ + XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */ + XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */ + XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */ + XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */ + XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */ + XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/ + XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */ + XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */ + XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */ + XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */ + XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */ + XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */ + XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */ + XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */ + XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */ + XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */ + XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */ + XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */ + XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */ + XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */ + XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */ + XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */ + XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */ + XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */ + XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */ + XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */ + XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */ + XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/ + XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/ + XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */ + XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */ + XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/ + XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */ + XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */ + XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */ + XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */ + XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */ + XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */ + XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/ + XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */ + XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */ + XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */ + XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */ + XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */ + XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */ + XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */ + XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */ + XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */ + XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */ + XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */ + XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */ + XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/ + XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */ + XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */ + XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */ + XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */ + XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */ + XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/ + XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/ +}; + +/* XM_MMU_CMD 16 bit r/w MMU Command Register */ +enum { + XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */ + XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */ + XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */ + XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */ + XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */ + XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */ + XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */ + XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */ + XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */ + XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */ + XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */ + XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */ +}; + + +/* XM_TX_CMD 16 bit r/w Transmit Command Register */ +enum { + XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/ + XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */ + XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */ + XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */ + XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */ + XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */ + XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */ +}; + +/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */ +#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ + + +/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */ +#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ + + +/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */ +#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ + + +/* XM_RX_CMD 16 bit r/w Receive Command Register */ +enum { + XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */ + /* inrange error packets */ + XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */ + /* jumbo packets */ + XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */ + XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */ + XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */ + XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */ + XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */ + XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */ + XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */ +}; + + +/* XM_GP_PORT 32 bit r/w General Purpose Port Register */ +enum { + XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ + XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */ + XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */ + XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */ + XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */ +}; + + +/* XM_IMSK 16 bit r/w Interrupt Mask Register */ +/* XM_ISRC 16 bit r/o Interrupt Status Register */ +enum { + XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */ + XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */ + XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */ + XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */ + XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */ + XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */ + XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */ + XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */ + XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */ + XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */ + XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */ + XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */ + XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ + XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ + XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ + + XM_IMSK_DISABLE = 0xffff, +}; + +/* XM_HW_CFG 16 bit r/w Hardware Config Register */ +enum { + XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */ + XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/ + XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */ +}; + + +/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */ +/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */ +#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ + +/* XM_TX_THR 16 bit r/w Tx Request Threshold */ +/* XM_HT_THR 16 bit r/w Host Request Threshold */ +/* XM_RX_THR 16 bit r/w Rx Request Threshold */ +#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */ + + +/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */ +enum { + XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */ + XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */ + XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */ + XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */ + XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */ + XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/ + XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */ + XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */ + XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */ + XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */ + XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */ + XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */ + XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */ + XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */ + XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */ +}; + +/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */ +/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */ +#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ + + +/* XM_DEV_ID 32 bit r/o Device ID Register */ +#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ +#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ + + +/* XM_MODE 32 bit r/w Mode Register */ +enum { + XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */ + XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */ + /* extern generated */ + XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */ + XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */ + /* intern generated */ + XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */ + XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */ + XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */ + XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */ + XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */ + /* intern generated */ + XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */ + /* intern generated */ + XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */ + XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */ + XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */ + XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */ + XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */ + XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */ + XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */ + XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */ + XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */ + XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */ + XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */ + XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */ + XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */ + XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */ + XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */ + XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */ + XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */ +}; + +#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) +#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ + XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA) + +/* XM_STAT_CMD 16 bit r/w Statistics Command Register */ +enum { + XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */ + XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */ + XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */ + XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */ + XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */ + XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */ +}; + + +/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */ +/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */ +enum { + XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ + XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/ + XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/ + XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/ + XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */ + XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */ + XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */ + XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */ + XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */ + XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */ + XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/ + XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */ + XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/ + XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */ + XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */ + XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */ + XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */ + XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */ + XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */ + XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */ + XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/ + XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */ + XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ + XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ + XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/ + XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */ + XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */ + XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/ + XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/ + XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */ +}; + +#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV) + +/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */ +/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */ +enum { + XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/ + XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/ + XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/ + XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/ + XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */ + XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */ + XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */ + XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */ + XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/ + XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */ + XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */ + XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */ + XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */ + XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/ + XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */ + XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */ + XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/ + XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ + XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */ + XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */ + XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */ + XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */ + XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */ + XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/ + XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/ + XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */ +}; + +#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV) + +struct skge_rx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 timestamp; + u16 csum2; + u16 csum1; + u16 csum2_start; + u16 csum1_start; +}; + +struct skge_tx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 csum_offs; + u16 csum_write; + u16 csum_start; + u32 rsvd; +}; + +struct skge_element { + struct skge_element *next; + void *desc; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct skge_ring { + struct skge_element *to_clean; + struct skge_element *to_use; + struct skge_element *start; + unsigned long count; +}; + + +struct skge_hw { + void __iomem *regs; + struct pci_dev *pdev; + spinlock_t hw_lock; + u32 intr_mask; + struct net_device *dev[2]; + + u8 chip_id; + u8 chip_rev; + u8 copper; + u8 ports; + u8 phy_type; + + u32 ram_size; + u32 ram_offset; + u16 phy_addr; + spinlock_t phy_lock; + struct tasklet_struct phy_task; + + char irq_name[0]; /* skge@pci:000:04:00.0 */ +}; + +enum pause_control { + FLOW_MODE_NONE = 1, /* No Flow-Control */ + FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */ + FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ + FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or + * just the remote station may send PAUSE + */ +}; + +enum pause_status { + FLOW_STAT_INDETERMINATED=0, /* indeterminated */ + FLOW_STAT_NONE, /* No Flow Control */ + FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */ + FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */ + FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */ +}; + + +struct skge_port { + struct skge_hw *hw; + struct net_device *netdev; + struct napi_struct napi; + int port; + u32 msg_enable; + + struct skge_ring tx_ring; + + struct skge_ring rx_ring ____cacheline_aligned_in_smp; + unsigned int rx_buf_size; + + struct timer_list link_timer; + enum pause_control flow_control; + enum pause_status flow_status; + u8 blink_on; + u8 wol; + u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ + u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ + u16 speed; /* SPEED_1000, SPEED_100, ... */ + u32 advertising; + + void *mem; /* PCI memory for rings */ + dma_addr_t dma; + unsigned long mem_size; +#ifdef CONFIG_SKGE_DEBUG + struct dentry *debugfs; +#endif +}; + + +/* Register accessor for memory mapped device */ +static inline u32 skge_read32(const struct skge_hw *hw, int reg) +{ + return readl(hw->regs + reg); +} + +static inline u16 skge_read16(const struct skge_hw *hw, int reg) +{ + return readw(hw->regs + reg); +} + +static inline u8 skge_read8(const struct skge_hw *hw, int reg) +{ + return readb(hw->regs + reg); +} + +static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val) +{ + writel(val, hw->regs + reg); +} + +static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val) +{ + writew(val, hw->regs + reg); +} + +static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val) +{ + writeb(val, hw->regs + reg); +} + +/* MAC Related Registers inside the device. */ +#define SK_REG(port,reg) (((port)<<7)+(u16)(reg)) +#define SK_XMAC_REG(port, reg) \ + ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) + +static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg) +{ + u32 v; + v = skge_read16(hw, SK_XMAC_REG(port, reg)); + v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16; + return v; +} + +static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_XMAC_REG(port,reg)); +} + +static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff); + skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16); +} + +static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v); +} + +static inline void xm_outhash(const struct skge_hw *hw, int port, int reg, + const u8 *hash) +{ + xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8)); + xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8)); + xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8)); + xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8)); +} + +static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8)); + xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8)); + xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8)); +} + +#define SK_GMAC_REG(port,reg) \ + (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) + +static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_GMAC_REG(port,reg)); +} + +static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg) +{ + return (u32) skge_read16(hw, SK_GMAC_REG(port,reg)) + | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16); +} + +static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_GMAC_REG(port,r), v); +} + +static inline void gma_set_addr(struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); + gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); + gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); +} + +#endif diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c new file mode 100644 index 000000000000..57339da76326 --- /dev/null +++ b/drivers/net/ethernet/marvell/sky2.c @@ -0,0 +1,5130 @@ +/* + * New driver for Marvell Yukon 2 chipset. + * Based on earlier sk98lin, and skge driver. + * + * This driver intentionally does not support all the features + * of the original driver such as link fail-over and link management because + * those should be done at higher levels. + * + * Copyright (C) 2005 Stephen Hemminger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "sky2.h" + +#define DRV_NAME "sky2" +#define DRV_VERSION "1.29" + +/* + * The Yukon II chipset takes 64 bit command blocks (called list elements) + * that are organized into three (receive, transmit, status) different rings + * similar to Tigon3. + */ + +#define RX_LE_SIZE 1024 +#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) +#define RX_MAX_PENDING (RX_LE_SIZE/6 - 2) +#define RX_DEF_PENDING RX_MAX_PENDING + +/* This is the worst case number of transmit list elements for a single skb: + VLAN:GSO + CKSUM + Data + skb_frags * DMA */ +#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) +#define TX_MIN_PENDING (MAX_SKB_TX_LE+1) +#define TX_MAX_PENDING 1024 +#define TX_DEF_PENDING 127 + +#define TX_WATCHDOG (5 * HZ) +#define NAPI_WEIGHT 64 +#define PHY_RETRIES 1000 + +#define SKY2_EEPROM_MAGIC 0x9955aabb + +#define RING_NEXT(x, s) (((x)+1) & ((s)-1)) + +static const u32 default_msg = + NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK + | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR + | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static int copybreak __read_mostly = 128; +module_param(copybreak, int, 0); +MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +static int disable_msi = 0; +module_param(disable_msi, int, 0); +MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, sky2_id_table); + +/* Avoid conditionals by using array */ +static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; +static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; +static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; + +static void sky2_set_multicast(struct net_device *dev); + +/* Access to PHY via serial interconnect */ +static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) +{ + int i; + + gma_write16(hw, port, GM_SMI_DATA, val); + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg)); + + for (i = 0; i < PHY_RETRIES; i++) { + u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); + if (ctrl == 0xffff) + goto io_error; + + if (!(ctrl & GM_SMI_CT_BUSY)) + return 0; + + udelay(10); + } + + dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name); + return -ETIMEDOUT; + +io_error: + dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); + return -EIO; +} + +static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) +{ + int i; + + gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); + + for (i = 0; i < PHY_RETRIES; i++) { + u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); + if (ctrl == 0xffff) + goto io_error; + + if (ctrl & GM_SMI_CT_RD_VAL) { + *val = gma_read16(hw, port, GM_SMI_DATA); + return 0; + } + + udelay(10); + } + + dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name); + return -ETIMEDOUT; +io_error: + dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); + return -EIO; +} + +static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) +{ + u16 v; + __gm_phy_read(hw, port, reg, &v); + return v; +} + + +static void sky2_power_on(struct sky2_hw *hw) +{ + /* switch power to VCC (WA for VAUX problem) */ + sky2_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); + + /* disable Core Clock Division, */ + sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); + + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) + /* enable bits are inverted */ + sky2_write8(hw, B2_Y2_CLK_GATE, + Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | + Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | + Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); + else + sky2_write8(hw, B2_Y2_CLK_GATE, 0); + + if (hw->flags & SKY2_HW_ADV_POWER_CTL) { + u32 reg; + + sky2_pci_write32(hw, PCI_DEV_REG3, 0); + + reg = sky2_pci_read32(hw, PCI_DEV_REG4); + /* set all bits to 0 except bits 15..12 and 8 */ + reg &= P_ASPM_CONTROL_MSK; + sky2_pci_write32(hw, PCI_DEV_REG4, reg); + + reg = sky2_pci_read32(hw, PCI_DEV_REG5); + /* set all bits to 0 except bits 28 & 27 */ + reg &= P_CTL_TIM_VMAIN_AV_MSK; + sky2_pci_write32(hw, PCI_DEV_REG5, reg); + + sky2_pci_write32(hw, PCI_CFG_REG_1, 0); + + sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); + + /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ + reg = sky2_read32(hw, B2_GP_IO); + reg |= GLB_GPIO_STAT_RACE_DIS; + sky2_write32(hw, B2_GP_IO, reg); + + sky2_read32(hw, B2_GP_IO); + } + + /* Turn on "driver loaded" LED */ + sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON); +} + +static void sky2_power_aux(struct sky2_hw *hw) +{ + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) + sky2_write8(hw, B2_Y2_CLK_GATE, 0); + else + /* enable bits are inverted */ + sky2_write8(hw, B2_Y2_CLK_GATE, + Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | + Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | + Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); + + /* switch power to VAUX if supported and PME from D3cold */ + if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) && + pci_pme_capable(hw->pdev, PCI_D3cold)) + sky2_write8(hw, B0_POWER_CTRL, + (PC_VAUX_ENA | PC_VCC_ENA | + PC_VAUX_ON | PC_VCC_OFF)); + + /* turn off "driver loaded LED" */ + sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF); +} + +static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) +{ + u16 reg; + + /* disable all GMAC IRQ's */ + sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + + gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ + gma_write16(hw, port, GM_MC_ADDR_H2, 0); + gma_write16(hw, port, GM_MC_ADDR_H3, 0); + gma_write16(hw, port, GM_MC_ADDR_H4, 0); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +/* flow control to advertise bits */ +static const u16 copper_fc_adv[] = { + [FC_NONE] = 0, + [FC_TX] = PHY_M_AN_ASP, + [FC_RX] = PHY_M_AN_PC, + [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP, +}; + +/* flow control to advertise bits when using 1000BaseX */ +static const u16 fiber_fc_adv[] = { + [FC_NONE] = PHY_M_P_NO_PAUSE_X, + [FC_TX] = PHY_M_P_ASYM_MD_X, + [FC_RX] = PHY_M_P_SYM_MD_X, + [FC_BOTH] = PHY_M_P_BOTH_MD_X, +}; + +/* flow control to GMA disable bits */ +static const u16 gm_fc_disable[] = { + [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS, + [FC_TX] = GM_GPCR_FC_RX_DIS, + [FC_RX] = GM_GPCR_FC_TX_DIS, + [FC_BOTH] = 0, +}; + + +static void sky2_phy_init(struct sky2_hw *hw, unsigned port) +{ + struct sky2_port *sky2 = netdev_priv(hw->dev[port]); + u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; + + if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && + !(hw->flags & SKY2_HW_NEWER_PHY)) { + u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + + ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + + /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */ + if (hw->chip_id == CHIP_ID_YUKON_EC) + /* set downshift counter to 3x and enable downshift */ + ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; + else + /* set master & slave downshift counter to 1x */ + ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); + } + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + if (sky2_is_copper(hw)) { + if (!(hw->flags & SKY2_HW_GIGABIT)) { + /* enable automatic crossover */ + ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; + + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + u16 spec; + + /* Enable Class A driver for FE+ A0 */ + spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); + spec |= PHY_M_FESC_SEL_CL_A; + gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); + } + } else { + if (hw->chip_id >= CHIP_ID_YUKON_OPT) { + u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2); + + /* enable PHY Reverse Auto-Negotiation */ + ctrl2 |= 1u << 13; + + /* Write PHY changes (SW-reset must follow) */ + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2); + } + + + /* disable energy detect */ + ctrl &= ~PHY_M_PC_EN_DET_MSK; + + /* enable automatic crossover */ + ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); + + /* downshift on PHY 88E1112 and 88E1149 is changed */ + if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && + (hw->flags & SKY2_HW_NEWER_PHY)) { + /* set downshift counter to 3x and enable downshift */ + ctrl &= ~PHY_M_PC_DSC_MSK; + ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; + } + } + } else { + /* workaround for deviation #4.88 (CRC errors) */ + /* disable Automatic Crossover */ + + ctrl &= ~PHY_M_PC_MDIX_MSK; + } + + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* special setup for PHY 88E1112 Fiber */ + if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl &= ~PHY_M_MAC_MD_MSK; + ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX); + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + if (hw->pmd_type == 'P') { + /* select page 1 to access Fiber registers */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1); + + /* for SFP-module set SIGDET polarity to low */ + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_FIB_SIGD_POL; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + } + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + } + + ctrl = PHY_CT_RESET; + ct1000 = 0; + adv = PHY_AN_CSMA; + reg = 0; + + if (sky2->flags & SKY2_FLAG_AUTO_SPEED) { + if (sky2_is_copper(hw)) { + if (sky2->advertising & ADVERTISED_1000baseT_Full) + ct1000 |= PHY_M_1000C_AFD; + if (sky2->advertising & ADVERTISED_1000baseT_Half) + ct1000 |= PHY_M_1000C_AHD; + if (sky2->advertising & ADVERTISED_100baseT_Full) + adv |= PHY_M_AN_100_FD; + if (sky2->advertising & ADVERTISED_100baseT_Half) + adv |= PHY_M_AN_100_HD; + if (sky2->advertising & ADVERTISED_10baseT_Full) + adv |= PHY_M_AN_10_FD; + if (sky2->advertising & ADVERTISED_10baseT_Half) + adv |= PHY_M_AN_10_HD; + + } else { /* special defines for FIBER (88E1040S only) */ + if (sky2->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_M_AN_1000X_AFD; + if (sky2->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_M_AN_1000X_AHD; + } + + /* Restart Auto-negotiation */ + ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* forced speed/duplex settings */ + ct1000 = PHY_M_1000C_MSE; + + /* Disable auto update for duplex flow control and duplex */ + reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS; + + switch (sky2->speed) { + case SPEED_1000: + ctrl |= PHY_CT_SP1000; + reg |= GM_GPCR_SPEED_1000; + break; + case SPEED_100: + ctrl |= PHY_CT_SP100; + reg |= GM_GPCR_SPEED_100; + break; + } + + if (sky2->duplex == DUPLEX_FULL) { + reg |= GM_GPCR_DUP_FULL; + ctrl |= PHY_CT_DUP_MD; + } else if (sky2->speed < SPEED_1000) + sky2->flow_mode = FC_NONE; + } + + if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) { + if (sky2_is_copper(hw)) + adv |= copper_fc_adv[sky2->flow_mode]; + else + adv |= fiber_fc_adv[sky2->flow_mode]; + } else { + reg |= GM_GPCR_AU_FCT_DIS; + reg |= gm_fc_disable[sky2->flow_mode]; + + /* Forward pause packets to GMAC? */ + if (sky2->flow_mode & FC_RX) + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + else + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + } + + gma_write16(hw, port, GM_GP_CTRL, reg); + + if (hw->flags & SKY2_HW_GIGABIT) + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); + + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* Setup Phy LED's */ + ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); + ledover = 0; + + switch (hw->chip_id) { + case CHIP_ID_YUKON_FE: + /* on 88E3082 these bits are at 11..9 (shifted left) */ + ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1; + + ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR); + + /* delete ACT LED control bits */ + ctrl &= ~PHY_M_FELP_LED1_MSK; + /* change ACT LED control to blink mode */ + ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL); + gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); + break; + + case CHIP_ID_YUKON_FE_P: + /* Enable Link Partner Next Page */ + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_ENA_LIP_NP; + + /* disable Energy Detect and enable scrambler */ + ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB); + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ + ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) | + PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) | + PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED); + + gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); + break; + + case CHIP_ID_YUKON_XL: + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* select page 3 to access LED control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + /* set LED Function Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ + PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ + PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ + PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ + + /* set Polarity Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_STAT, + (PHY_M_POLC_LS1_P_MIX(4) | + PHY_M_POLC_IS0_P_MIX(4) | + PHY_M_POLC_LOS_CTRL(2) | + PHY_M_POLC_INIT_CTRL(2) | + PHY_M_POLC_STA1_CTRL(2) | + PHY_M_POLC_STA0_CTRL(2))); + + /* restore page register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + break; + + case CHIP_ID_YUKON_EC_U: + case CHIP_ID_YUKON_EX: + case CHIP_ID_YUKON_SUPR: + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* select page 3 to access LED control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + /* set LED Function Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ + PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */ + PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ + PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */ + + /* set Blink Rate in LED Timer Control Register */ + gm_phy_write(hw, port, PHY_MARV_INT_MASK, + ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS)); + /* restore page register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + break; + + default: + /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ + ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; + + /* turn off the Rx LED (LED_RX) */ + ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); + } + + if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) { + /* apply fixes in PHY AFE */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); + + /* increase differential signal amplitude in 10BASE-T */ + gm_phy_write(hw, port, 0x18, 0xaa99); + gm_phy_write(hw, port, 0x17, 0x2011); + + if (hw->chip_id == CHIP_ID_YUKON_EC_U) { + /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ + gm_phy_write(hw, port, 0x18, 0xa204); + gm_phy_write(hw, port, 0x17, 0x2002); + } + + /* set page register to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + /* apply workaround for integrated resistors calibration */ + gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); + gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); + } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { + /* apply fixes in PHY AFE */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); + + /* apply RDAC termination workaround */ + gm_phy_write(hw, port, 24, 0x2800); + gm_phy_write(hw, port, 23, 0x2001); + + /* set page register back to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } else if (hw->chip_id != CHIP_ID_YUKON_EX && + hw->chip_id < CHIP_ID_YUKON_SUPR) { + /* no effect on Yukon-XL */ + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); + + if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) || + sky2->speed == SPEED_100) { + /* turn on 100 Mbps LED (LED_LINK100) */ + ledover |= PHY_M_LED_MO_100(MO_LED_ON); + } + + if (ledover) + gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); + + } else if (hw->chip_id == CHIP_ID_YUKON_PRM && + (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) { + int i; + /* This a phy register setup workaround copied from vendor driver. */ + static const struct { + u16 reg, val; + } eee_afe[] = { + { 0x156, 0x58ce }, + { 0x153, 0x99eb }, + { 0x141, 0x8064 }, + /* { 0x155, 0x130b },*/ + { 0x000, 0x0000 }, + { 0x151, 0x8433 }, + { 0x14b, 0x8c44 }, + { 0x14c, 0x0f90 }, + { 0x14f, 0x39aa }, + /* { 0x154, 0x2f39 },*/ + { 0x14d, 0xba33 }, + { 0x144, 0x0048 }, + { 0x152, 0x2010 }, + /* { 0x158, 0x1223 },*/ + { 0x140, 0x4444 }, + { 0x154, 0x2f3b }, + { 0x158, 0xb203 }, + { 0x157, 0x2029 }, + }; + + /* Start Workaround for OptimaEEE Rev.Z0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb); + + gm_phy_write(hw, port, 1, 0x4099); + gm_phy_write(hw, port, 3, 0x1120); + gm_phy_write(hw, port, 11, 0x113c); + gm_phy_write(hw, port, 14, 0x8100); + gm_phy_write(hw, port, 15, 0x112a); + gm_phy_write(hw, port, 17, 0x1008); + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc); + gm_phy_write(hw, port, 1, 0x20b0); + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); + + for (i = 0; i < ARRAY_SIZE(eee_afe); i++) { + /* apply AFE settings */ + gm_phy_write(hw, port, 17, eee_afe[i].val); + gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13); + } + + /* End Workaround for OptimaEEE */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + + /* Enable 10Base-Te (EEE) */ + if (hw->chip_id >= CHIP_ID_YUKON_PRM) { + reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, + reg | PHY_M_10B_TE_ENABLE); + } + } + + /* Enable phy interrupt on auto-negotiation complete (or link up) */ + if (sky2->flags & SKY2_FLAG_AUTO_SPEED) + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); + else + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); +} + +static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; +static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; + +static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) +{ + u32 reg1; + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 &= ~phy_power[port]; + + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) + reg1 |= coma_mode[port]; + + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + sky2_pci_read32(hw, PCI_DEV_REG1); + + if (hw->chip_id == CHIP_ID_YUKON_FE) + gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE); + else if (hw->flags & SKY2_HW_ADV_POWER_CTL) + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); +} + +static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) +{ + u32 reg1; + u16 ctrl; + + /* release GPHY Control reset */ + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + + /* release GMAC reset */ + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + if (hw->flags & SKY2_HW_NEWER_PHY) { + /* select page 2 to access MAC control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + /* allow GMII Power Down */ + ctrl &= ~PHY_M_MAC_GMIF_PUP; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set page register back to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } + + /* setup General Purpose Control Register */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | + GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS | + GM_GPCR_AU_SPD_DIS); + + if (hw->chip_id != CHIP_ID_YUKON_EC) { + if (hw->chip_id == CHIP_ID_YUKON_EC_U) { + /* select page 2 to access MAC control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + /* enable Power Down */ + ctrl |= PHY_M_PC_POW_D_ENA; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set page register back to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } + + /* set IEEE compatible Power Down Mode (dev. #4.99) */ + gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); + } + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); +} + +/* configure IPG according to used link speed */ +static void sky2_set_ipg(struct sky2_port *sky2) +{ + u16 reg; + + reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE); + reg &= ~GM_SMOD_IPG_MSK; + if (sky2->speed > SPEED_100) + reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000); + else + reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); + gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg); +} + +/* Enable Rx/Tx */ +static void sky2_enable_rx_tx(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 reg; + + reg = gma_read16(hw, port, GM_GP_CTRL); + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; + gma_write16(hw, port, GM_GP_CTRL, reg); +} + +/* Force a renegotiation */ +static void sky2_phy_reinit(struct sky2_port *sky2) +{ + spin_lock_bh(&sky2->phy_lock); + sky2_phy_init(sky2->hw, sky2->port); + sky2_enable_rx_tx(sky2); + spin_unlock_bh(&sky2->phy_lock); +} + +/* Put device in state to listen for Wake On Lan */ +static void sky2_wol_init(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + enum flow_control save_mode; + u16 ctrl; + + /* Bring hardware out of reset */ + sky2_write16(hw, B0_CTST, CS_RST_CLR); + sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); + + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + /* Force to 10/100 + * sky2_reset will re-enable on resume + */ + save_mode = sky2->flow_mode; + ctrl = sky2->advertising; + + sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); + sky2->flow_mode = FC_NONE; + + spin_lock_bh(&sky2->phy_lock); + sky2_phy_power_up(hw, port); + sky2_phy_init(hw, port); + spin_unlock_bh(&sky2->phy_lock); + + sky2->flow_mode = save_mode; + sky2->advertising = ctrl; + + /* Set GMAC to no flow control and auto update for speed/duplex */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| + GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); + + /* Set WOL address */ + memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), + sky2->netdev->dev_addr, ETH_ALEN); + + /* Turn on appropriate WOL control bits */ + sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); + ctrl = 0; + if (sky2->wol & WAKE_PHY) + ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; + + if (sky2->wol & WAKE_MAGIC) + ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; + + ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; + sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); + + /* Disable PiG firmware */ + sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); + + /* block receiver */ + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); +} + +static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) +{ + struct net_device *dev = hw->dev[port]; + + if ( (hw->chip_id == CHIP_ID_YUKON_EX && + hw->chip_rev != CHIP_REV_YU_EX_A0) || + hw->chip_id >= CHIP_ID_YUKON_FE_P) { + /* Yukon-Extreme B0 and further Extreme devices */ + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); + } else if (dev->mtu > ETH_DATA_LEN) { + /* set Tx GMAC FIFO Almost Empty Threshold */ + sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), + (ECU_JUMBO_WM << 16) | ECU_AE_THR); + + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); + } else + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); +} + +static void sky2_mac_init(struct sky2_hw *hw, unsigned port) +{ + struct sky2_port *sky2 = netdev_priv(hw->dev[port]); + u16 reg; + u32 rx_reg; + int i; + const u8 *addr = hw->dev[port]->dev_addr; + + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + if (hw->chip_id == CHIP_ID_YUKON_XL && + hw->chip_rev == CHIP_REV_YU_XL_A0 && + port == 1) { + /* WA DEV_472 -- looks like crossed wires on port 2 */ + /* clear GMAC 1 Control reset */ + sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); + do { + sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET); + sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR); + } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL || + gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 || + gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0); + } + + sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); + + /* Enable Transmit FIFO Underrun */ + sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); + + spin_lock_bh(&sky2->phy_lock); + sky2_phy_power_up(hw, port); + sky2_phy_init(hw, port); + spin_unlock_bh(&sky2->phy_lock); + + /* MIB clear */ + reg = gma_read16(hw, port, GM_PHY_ADDR); + gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); + + for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4) + gma_read16(hw, port, i); + gma_write16(hw, port, GM_PHY_ADDR, reg); + + /* transmit control */ + gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); + + /* receive control reg: unicast + multicast + no FCS */ + gma_write16(hw, port, GM_RX_CTRL, + GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); + + /* transmit flow control */ + gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); + + /* transmit parameter */ + gma_write16(hw, port, GM_TX_PARAM, + TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | + TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | + TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | + TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); + + /* serial mode register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | + GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) + reg |= GM_SMOD_JUMBO_ENA; + + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev == CHIP_REV_YU_EC_U_B1) + reg |= GM_NEW_FLOW_CTRL; + + gma_write16(hw, port, GM_SERIAL_MODE, reg); + + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); + + /* ignore counter overflows */ + gma_write16(hw, port, GM_TX_IRQ_MSK, 0); + gma_write16(hw, port, GM_RX_IRQ_MSK, 0); + gma_write16(hw, port, GM_TR_IRQ_MSK, 0); + + /* Configure Rx MAC FIFO */ + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); + rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_FE_P) + rx_reg |= GMF_RX_OVER_ON; + + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); + + if (hw->chip_id == CHIP_ID_YUKON_XL) { + /* Hardware errata - clear flush mask */ + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0); + } else { + /* Flush Rx MAC FIFO on any flow control or error */ + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); + } + + /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ + reg = RX_GMF_FL_THR_DEF + 1; + /* Another magic mystery workaround from sk98lin */ + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) + reg = 0x178; + sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg); + + /* Configure Tx MAC FIFO */ + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); + sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); + + /* On chips without ram buffer, pause is controlled by MAC level */ + if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { + /* Pause threshold is scaled by 8 in bytes */ + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) + reg = 1568 / 8; + else + reg = 1024 / 8; + sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg); + sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8); + + sky2_set_tx_stfwd(hw, port); + } + + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + /* disable dynamic watermark */ + reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA)); + reg &= ~TX_DYN_WM_ENA; + sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg); + } +} + +/* Assign Ram Buffer allocation to queue */ +static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) +{ + u32 end; + + /* convert from K bytes to qwords used for hw register */ + start *= 1024/8; + space *= 1024/8; + end = start + space - 1; + + sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); + sky2_write32(hw, RB_ADDR(q, RB_START), start); + sky2_write32(hw, RB_ADDR(q, RB_END), end); + sky2_write32(hw, RB_ADDR(q, RB_WP), start); + sky2_write32(hw, RB_ADDR(q, RB_RP), start); + + if (q == Q_R1 || q == Q_R2) { + u32 tp = space - space/4; + + /* On receive queue's set the thresholds + * give receiver priority when > 3/4 full + * send pause when down to 2K + */ + sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); + sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); + + tp = space - 2048/8; + sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); + sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); + } else { + /* Enable store & forward on Tx queue's because + * Tx FIFO is only 1K on Yukon + */ + sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); + } + + sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); + sky2_read8(hw, RB_ADDR(q, RB_CTRL)); +} + +/* Setup Bus Memory Interface */ +static void sky2_qset(struct sky2_hw *hw, u16 q) +{ + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET); + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT); + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON); + sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT); +} + +/* Setup prefetch unit registers. This is the interface between + * hardware and driver list elements + */ +static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, + dma_addr_t addr, u32 last) +{ + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr)); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr)); + sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON); + + sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL)); +} + +static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) +{ + struct sky2_tx_le *le = sky2->tx_le + *slot; + + *slot = RING_NEXT(*slot, sky2->tx_ring_size); + le->ctrl = 0; + return le; +} + +static void tx_init(struct sky2_port *sky2) +{ + struct sky2_tx_le *le; + + sky2->tx_prod = sky2->tx_cons = 0; + sky2->tx_tcpsum = 0; + sky2->tx_last_mss = 0; + + le = get_tx_le(sky2, &sky2->tx_prod); + le->addr = 0; + le->opcode = OP_ADDR64 | HW_OWNER; + sky2->tx_last_upper = 0; +} + +/* Update chip's next pointer */ +static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) +{ + /* Make sure write' to descriptors are complete before we tell hardware */ + wmb(); + sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); + + /* Synchronize I/O on since next processor may write to tail */ + mmiowb(); +} + + +static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) +{ + struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; + sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); + le->ctrl = 0; + return le; +} + +static unsigned sky2_get_rx_threshold(struct sky2_port *sky2) +{ + unsigned size; + + /* Space needed for frame data + headers rounded up */ + size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); + + /* Stopping point for hardware truncation */ + return (size - 8) / sizeof(u32); +} + +static unsigned sky2_get_rx_data_size(struct sky2_port *sky2) +{ + struct rx_ring_info *re; + unsigned size; + + /* Space needed for frame data + headers rounded up */ + size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); + + sky2->rx_nfrags = size >> PAGE_SHIFT; + BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr)); + + /* Compute residue after pages */ + size -= sky2->rx_nfrags << PAGE_SHIFT; + + /* Optimize to handle small packets and headers */ + if (size < copybreak) + size = copybreak; + if (size < ETH_HLEN) + size = ETH_HLEN; + + return size; +} + +/* Build description to hardware for one receive segment */ +static void sky2_rx_add(struct sky2_port *sky2, u8 op, + dma_addr_t map, unsigned len) +{ + struct sky2_rx_le *le; + + if (sizeof(dma_addr_t) > sizeof(u32)) { + le = sky2_next_rx(sky2); + le->addr = cpu_to_le32(upper_32_bits(map)); + le->opcode = OP_ADDR64 | HW_OWNER; + } + + le = sky2_next_rx(sky2); + le->addr = cpu_to_le32(lower_32_bits(map)); + le->length = cpu_to_le16(len); + le->opcode = op | HW_OWNER; +} + +/* Build description to hardware for one possibly fragmented skb */ +static void sky2_rx_submit(struct sky2_port *sky2, + const struct rx_ring_info *re) +{ + int i; + + sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size); + + for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++) + sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE); +} + + +static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, + unsigned size) +{ + struct sk_buff *skb = re->skb; + int i; + + re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, re->data_addr)) + goto mapping_error; + + dma_unmap_len_set(re, data_size, size); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + re->frag_addr[i] = pci_map_page(pdev, frag->page, + frag->page_offset, + frag->size, + PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(pdev, re->frag_addr[i])) + goto map_page_error; + } + return 0; + +map_page_error: + while (--i >= 0) { + pci_unmap_page(pdev, re->frag_addr[i], + skb_shinfo(skb)->frags[i].size, + PCI_DMA_FROMDEVICE); + } + + pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), + PCI_DMA_FROMDEVICE); + +mapping_error: + if (net_ratelimit()) + dev_warn(&pdev->dev, "%s: rx mapping error\n", + skb->dev->name); + return -EIO; +} + +static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) +{ + struct sk_buff *skb = re->skb; + int i; + + pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), + PCI_DMA_FROMDEVICE); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) + pci_unmap_page(pdev, re->frag_addr[i], + skb_shinfo(skb)->frags[i].size, + PCI_DMA_FROMDEVICE); +} + +/* Tell chip where to start receive checksum. + * Actually has two checksums, but set both same to avoid possible byte + * order problems. + */ +static void rx_set_checksum(struct sky2_port *sky2) +{ + struct sky2_rx_le *le = sky2_next_rx(sky2); + + le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); + le->ctrl = 0; + le->opcode = OP_TCPSTART | HW_OWNER; + + sky2_write32(sky2->hw, + Q_ADDR(rxqaddr[sky2->port], Q_CSR), + (sky2->netdev->features & NETIF_F_RXCSUM) + ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); +} + +/* Enable/disable receive hash calculation (RSS) */ +static void rx_set_rss(struct net_device *dev, u32 features) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + int i, nkeys = 4; + + /* Supports IPv6 and other modes */ + if (hw->flags & SKY2_HW_NEW_LE) { + nkeys = 10; + sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL); + } + + /* Program RSS initial values */ + if (features & NETIF_F_RXHASH) { + u32 key[nkeys]; + + get_random_bytes(key, nkeys * sizeof(u32)); + for (i = 0; i < nkeys; i++) + sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), + key[i]); + + /* Need to turn on (undocumented) flag to make hashing work */ + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), + RX_STFW_ENA); + + sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + BMU_ENA_RX_RSS_HASH); + } else + sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + BMU_DIS_RX_RSS_HASH); +} + +/* + * The RX Stop command will not work for Yukon-2 if the BMU does not + * reach the end of packet and since we can't make sure that we have + * incoming data, we must reset the BMU while it is not doing a DMA + * transfer. Since it is possible that the RX path is still active, + * the RX RAM buffer will be stopped first, so any possible incoming + * data will not trigger a DMA. After the RAM buffer is stopped, the + * BMU is polled until any DMA in progress is ended and only then it + * will be reset. + */ +static void sky2_rx_stop(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned rxq = rxqaddr[sky2->port]; + int i; + + /* disable the RAM Buffer receive queue */ + sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); + + for (i = 0; i < 0xffff; i++) + if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL)) + == sky2_read8(hw, RB_ADDR(rxq, Q_RL))) + goto stopped; + + netdev_warn(sky2->netdev, "receiver stop failed\n"); +stopped: + sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); + + /* reset the Rx prefetch unit */ + sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); + mmiowb(); +} + +/* Clean out receive buffer area, assumes receiver hardware stopped */ +static void sky2_rx_clean(struct sky2_port *sky2) +{ + unsigned i; + + memset(sky2->rx_le, 0, RX_LE_BYTES); + for (i = 0; i < sky2->rx_pending; i++) { + struct rx_ring_info *re = sky2->rx_ring + i; + + if (re->skb) { + sky2_rx_unmap_skb(sky2->hw->pdev, re); + kfree_skb(re->skb); + re->skb = NULL; + } + } +} + +/* Basic MII support */ +static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + int err = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = PHY_ADDR_MARV; + + /* fallthru */ + case SIOCGMIIREG: { + u16 val = 0; + + spin_lock_bh(&sky2->phy_lock); + err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); + spin_unlock_bh(&sky2->phy_lock); + + data->val_out = val; + break; + } + + case SIOCSMIIREG: + spin_lock_bh(&sky2->phy_lock); + err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, + data->val_in); + spin_unlock_bh(&sky2->phy_lock); + break; + } + return err; +} + +#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO) + +static void sky2_vlan_mode(struct net_device *dev, u32 features) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + u16 port = sky2->port; + + if (features & NETIF_F_HW_VLAN_RX) + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), + RX_VLAN_STRIP_ON); + else + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), + RX_VLAN_STRIP_OFF); + + if (features & NETIF_F_HW_VLAN_TX) { + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), + TX_VLAN_TAG_ON); + + dev->vlan_features |= SKY2_VLAN_OFFLOADS; + } else { + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), + TX_VLAN_TAG_OFF); + + /* Can't do transmit offload of vlan without hw vlan */ + dev->vlan_features &= ~SKY2_VLAN_OFFLOADS; + } +} + +/* Amount of required worst case padding in rx buffer */ +static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) +{ + return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2; +} + +/* + * Allocate an skb for receiving. If the MTU is large enough + * make the skb non-linear with a fragment list of pages. + */ +static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp) +{ + struct sk_buff *skb; + int i; + + skb = __netdev_alloc_skb(sky2->netdev, + sky2->rx_data_size + sky2_rx_pad(sky2->hw), + gfp); + if (!skb) + goto nomem; + + if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { + unsigned char *start; + /* + * Workaround for a bug in FIFO that cause hang + * if the FIFO if the receive buffer is not 64 byte aligned. + * The buffer returned from netdev_alloc_skb is + * aligned except if slab debugging is enabled. + */ + start = PTR_ALIGN(skb->data, 8); + skb_reserve(skb, start - skb->data); + } else + skb_reserve(skb, NET_IP_ALIGN); + + for (i = 0; i < sky2->rx_nfrags; i++) { + struct page *page = alloc_page(gfp); + + if (!page) + goto free_partial; + skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); + } + + return skb; +free_partial: + kfree_skb(skb); +nomem: + return NULL; +} + +static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq) +{ + sky2_put_idx(sky2->hw, rxq, sky2->rx_put); +} + +static int sky2_alloc_rx_skbs(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned i; + + sky2->rx_data_size = sky2_get_rx_data_size(sky2); + + /* Fill Rx ring */ + for (i = 0; i < sky2->rx_pending; i++) { + struct rx_ring_info *re = sky2->rx_ring + i; + + re->skb = sky2_rx_alloc(sky2, GFP_KERNEL); + if (!re->skb) + return -ENOMEM; + + if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) { + dev_kfree_skb(re->skb); + re->skb = NULL; + return -ENOMEM; + } + } + return 0; +} + +/* + * Setup receiver buffer pool. + * Normal case this ends up creating one list element for skb + * in the receive ring. Worst case if using large MTU and each + * allocation falls on a different 64 bit region, that results + * in 6 list elements per ring entry. + * One element is used for checksum enable/disable, and one + * extra to avoid wrap. + */ +static void sky2_rx_start(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + struct rx_ring_info *re; + unsigned rxq = rxqaddr[sky2->port]; + unsigned i, thresh; + + sky2->rx_put = sky2->rx_next = 0; + sky2_qset(hw, rxq); + + /* On PCI express lowering the watermark gives better performance */ + if (pci_is_pcie(hw->pdev)) + sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX); + + /* These chips have no ram buffer? + * MAC Rx RAM Read is controlled by hardware */ + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev > CHIP_REV_YU_EC_U_A0) + sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); + + sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); + + if (!(hw->flags & SKY2_HW_NEW_LE)) + rx_set_checksum(sky2); + + if (!(hw->flags & SKY2_HW_RSS_BROKEN)) + rx_set_rss(sky2->netdev, sky2->netdev->features); + + /* submit Rx ring */ + for (i = 0; i < sky2->rx_pending; i++) { + re = sky2->rx_ring + i; + sky2_rx_submit(sky2, re); + } + + /* + * The receiver hangs if it receives frames larger than the + * packet buffer. As a workaround, truncate oversize frames, but + * the register is limited to 9 bits, so if you do frames > 2052 + * you better get the MTU right! + */ + thresh = sky2_get_rx_threshold(sky2); + if (thresh > 0x1ff) + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); + else { + sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh); + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); + } + + /* Tell chip about available buffers */ + sky2_rx_update(sky2, rxq); + + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_SUPR) { + /* + * Disable flushing of non ASF packets; + * must be done after initializing the BMUs; + * drivers without ASF support should do this too, otherwise + * it may happen that they cannot run on ASF devices; + * remember that the MAC FIFO isn't reset during initialization. + */ + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF); + } + + if (hw->chip_id >= CHIP_ID_YUKON_SUPR) { + /* Enable RX Home Address & Routing Header checksum fix */ + sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL), + RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA); + + /* Enable TX Home Address & Routing Header checksum fix */ + sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST), + TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN); + } +} + +static int sky2_alloc_buffers(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + + /* must be power of 2 */ + sky2->tx_le = pci_alloc_consistent(hw->pdev, + sky2->tx_ring_size * + sizeof(struct sky2_tx_le), + &sky2->tx_le_map); + if (!sky2->tx_le) + goto nomem; + + sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info), + GFP_KERNEL); + if (!sky2->tx_ring) + goto nomem; + + sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, + &sky2->rx_le_map); + if (!sky2->rx_le) + goto nomem; + memset(sky2->rx_le, 0, RX_LE_BYTES); + + sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), + GFP_KERNEL); + if (!sky2->rx_ring) + goto nomem; + + return sky2_alloc_rx_skbs(sky2); +nomem: + return -ENOMEM; +} + +static void sky2_free_buffers(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + + sky2_rx_clean(sky2); + + if (sky2->rx_le) { + pci_free_consistent(hw->pdev, RX_LE_BYTES, + sky2->rx_le, sky2->rx_le_map); + sky2->rx_le = NULL; + } + if (sky2->tx_le) { + pci_free_consistent(hw->pdev, + sky2->tx_ring_size * sizeof(struct sky2_tx_le), + sky2->tx_le, sky2->tx_le_map); + sky2->tx_le = NULL; + } + kfree(sky2->tx_ring); + kfree(sky2->rx_ring); + + sky2->tx_ring = NULL; + sky2->rx_ring = NULL; +} + +static void sky2_hw_up(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u32 ramsize; + int cap; + struct net_device *otherdev = hw->dev[sky2->port^1]; + + tx_init(sky2); + + /* + * On dual port PCI-X card, there is an problem where status + * can be received out of order due to split transactions + */ + if (otherdev && netif_running(otherdev) && + (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { + u16 cmd; + + cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); + cmd &= ~PCI_X_CMD_MAX_SPLIT; + sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); + } + + sky2_mac_init(hw, port); + + /* Register is number of 4K blocks on internal RAM buffer. */ + ramsize = sky2_read8(hw, B2_E_0) * 4; + if (ramsize > 0) { + u32 rxspace; + + netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize); + if (ramsize < 16) + rxspace = ramsize / 2; + else + rxspace = 8 + (2*(ramsize - 16))/3; + + sky2_ramset(hw, rxqaddr[port], 0, rxspace); + sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); + + /* Make sure SyncQ is disabled */ + sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), + RB_RST_SET); + } + + sky2_qset(hw, txqaddr[port]); + + /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */ + if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0) + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF); + + /* Set almost empty threshold */ + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev == CHIP_REV_YU_EC_U_A0) + sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV); + + sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, + sky2->tx_ring_size - 1); + + sky2_vlan_mode(sky2->netdev, sky2->netdev->features); + netdev_update_features(sky2->netdev); + + sky2_rx_start(sky2); +} + +/* Bring up network interface. */ +static int sky2_up(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u32 imask; + int err; + + netif_carrier_off(dev); + + err = sky2_alloc_buffers(sky2); + if (err) + goto err_out; + + sky2_hw_up(sky2); + + /* Enable interrupts from phy/mac for port */ + imask = sky2_read32(hw, B0_IMSK); + imask |= portirq_msk[port]; + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + + netif_info(sky2, ifup, dev, "enabling interface\n"); + + return 0; + +err_out: + sky2_free_buffers(sky2); + return err; +} + +/* Modular subtraction in ring */ +static inline int tx_inuse(const struct sky2_port *sky2) +{ + return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1); +} + +/* Number of list elements available for next tx */ +static inline int tx_avail(const struct sky2_port *sky2) +{ + return sky2->tx_pending - tx_inuse(sky2); +} + +/* Estimate of number of transmit list elements required */ +static unsigned tx_le_req(const struct sk_buff *skb) +{ + unsigned count; + + count = (skb_shinfo(skb)->nr_frags + 1) + * (sizeof(dma_addr_t) / sizeof(u32)); + + if (skb_is_gso(skb)) + ++count; + else if (sizeof(dma_addr_t) == sizeof(u32)) + ++count; /* possible vlan */ + + if (skb->ip_summed == CHECKSUM_PARTIAL) + ++count; + + return count; +} + +static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) +{ + if (re->flags & TX_MAP_SINGLE) + pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr), + dma_unmap_len(re, maplen), + PCI_DMA_TODEVICE); + else if (re->flags & TX_MAP_PAGE) + pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr), + dma_unmap_len(re, maplen), + PCI_DMA_TODEVICE); + re->flags = 0; +} + +/* + * Put one packet in ring for transmit. + * A single packet can generate multiple list elements, and + * the number of ring elements will probably be less than the number + * of list elements used. + */ +static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, + struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + struct sky2_tx_le *le = NULL; + struct tx_ring_info *re; + unsigned i, len; + dma_addr_t mapping; + u32 upper; + u16 slot; + u16 mss; + u8 ctrl; + + if (unlikely(tx_avail(sky2) < tx_le_req(skb))) + return NETDEV_TX_BUSY; + + len = skb_headlen(skb); + mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + + if (pci_dma_mapping_error(hw->pdev, mapping)) + goto mapping_error; + + slot = sky2->tx_prod; + netif_printk(sky2, tx_queued, KERN_DEBUG, dev, + "tx queued, slot %u, len %d\n", slot, skb->len); + + /* Send high bits if needed */ + upper = upper_32_bits(mapping); + if (upper != sky2->tx_last_upper) { + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(upper); + sky2->tx_last_upper = upper; + le->opcode = OP_ADDR64 | HW_OWNER; + } + + /* Check for TCP Segmentation Offload */ + mss = skb_shinfo(skb)->gso_size; + if (mss != 0) { + + if (!(hw->flags & SKY2_HW_NEW_LE)) + mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); + + if (mss != sky2->tx_last_mss) { + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(mss); + + if (hw->flags & SKY2_HW_NEW_LE) + le->opcode = OP_MSS | HW_OWNER; + else + le->opcode = OP_LRGLEN | HW_OWNER; + sky2->tx_last_mss = mss; + } + } + + ctrl = 0; + + /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ + if (vlan_tx_tag_present(skb)) { + if (!le) { + le = get_tx_le(sky2, &slot); + le->addr = 0; + le->opcode = OP_VLAN|HW_OWNER; + } else + le->opcode |= OP_VLAN; + le->length = cpu_to_be16(vlan_tx_tag_get(skb)); + ctrl |= INS_VLAN; + } + + /* Handle TCP checksum offload */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* On Yukon EX (some versions) encoding change. */ + if (hw->flags & SKY2_HW_AUTO_TX_SUM) + ctrl |= CALSUM; /* auto checksum */ + else { + const unsigned offset = skb_transport_offset(skb); + u32 tcpsum; + + tcpsum = offset << 16; /* sum start */ + tcpsum |= offset + skb->csum_offset; /* sum write */ + + ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; + if (ip_hdr(skb)->protocol == IPPROTO_UDP) + ctrl |= UDPTCP; + + if (tcpsum != sky2->tx_tcpsum) { + sky2->tx_tcpsum = tcpsum; + + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(tcpsum); + le->length = 0; /* initial checksum value */ + le->ctrl = 1; /* one packet */ + le->opcode = OP_TCPLISW | HW_OWNER; + } + } + } + + re = sky2->tx_ring + slot; + re->flags = TX_MAP_SINGLE; + dma_unmap_addr_set(re, mapaddr, mapping); + dma_unmap_len_set(re, maplen, len); + + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(lower_32_bits(mapping)); + le->length = cpu_to_le16(len); + le->ctrl = ctrl; + le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); + + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + + if (pci_dma_mapping_error(hw->pdev, mapping)) + goto mapping_unwind; + + upper = upper_32_bits(mapping); + if (upper != sky2->tx_last_upper) { + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(upper); + sky2->tx_last_upper = upper; + le->opcode = OP_ADDR64 | HW_OWNER; + } + + re = sky2->tx_ring + slot; + re->flags = TX_MAP_PAGE; + dma_unmap_addr_set(re, mapaddr, mapping); + dma_unmap_len_set(re, maplen, frag->size); + + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(lower_32_bits(mapping)); + le->length = cpu_to_le16(frag->size); + le->ctrl = ctrl; + le->opcode = OP_BUFFER | HW_OWNER; + } + + re->skb = skb; + le->ctrl |= EOP; + + sky2->tx_prod = slot; + + if (tx_avail(sky2) <= MAX_SKB_TX_LE) + netif_stop_queue(dev); + + sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); + + return NETDEV_TX_OK; + +mapping_unwind: + for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) { + re = sky2->tx_ring + i; + + sky2_tx_unmap(hw->pdev, re); + } + +mapping_error: + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/* + * Free ring elements from starting at tx_cons until "done" + * + * NB: + * 1. The hardware will tell us about partial completion of multi-part + * buffers so make sure not to free skb to early. + * 2. This may run in parallel start_xmit because the it only + * looks at the tail of the queue of FIFO (tx_cons), not + * the head (tx_prod) + */ +static void sky2_tx_complete(struct sky2_port *sky2, u16 done) +{ + struct net_device *dev = sky2->netdev; + unsigned idx; + + BUG_ON(done >= sky2->tx_ring_size); + + for (idx = sky2->tx_cons; idx != done; + idx = RING_NEXT(idx, sky2->tx_ring_size)) { + struct tx_ring_info *re = sky2->tx_ring + idx; + struct sk_buff *skb = re->skb; + + sky2_tx_unmap(sky2->hw->pdev, re); + + if (skb) { + netif_printk(sky2, tx_done, KERN_DEBUG, dev, + "tx done %u\n", idx); + + u64_stats_update_begin(&sky2->tx_stats.syncp); + ++sky2->tx_stats.packets; + sky2->tx_stats.bytes += skb->len; + u64_stats_update_end(&sky2->tx_stats.syncp); + + re->skb = NULL; + dev_kfree_skb_any(skb); + + sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); + } + } + + sky2->tx_cons = idx; + smp_mb(); +} + +static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) +{ + /* Disable Force Sync bit and Enable Alloc bit */ + sky2_write8(hw, SK_REG(port, TXA_CTRL), + TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); + sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); + + /* Reset the PCI FIFO of the async Tx queue */ + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), + BMU_RST_SET | BMU_FIFO_RST); + + /* Reset the Tx prefetch units */ + sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL), + PREF_UNIT_RST_SET); + + sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); +} + +static void sky2_hw_down(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 ctrl; + + /* Force flow control off */ + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + + /* Stop transmitter */ + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); + sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR)); + + sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), + RB_RST_SET | RB_DIS_OP_MD); + + ctrl = gma_read16(hw, port, GM_GP_CTRL); + ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); + gma_write16(hw, port, GM_GP_CTRL, ctrl); + + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + + /* Workaround shared GMAC reset */ + if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && + port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); + + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + + /* Force any delayed status interrrupt and NAPI */ + sky2_write32(hw, STAT_LEV_TIMER_CNT, 0); + sky2_write32(hw, STAT_TX_TIMER_CNT, 0); + sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); + sky2_read8(hw, STAT_ISR_TIMER_CTRL); + + sky2_rx_stop(sky2); + + spin_lock_bh(&sky2->phy_lock); + sky2_phy_power_down(hw, port); + spin_unlock_bh(&sky2->phy_lock); + + sky2_tx_reset(hw, port); + + /* Free any pending frames stuck in HW queue */ + sky2_tx_complete(sky2, sky2->tx_prod); +} + +/* Network shutdown */ +static int sky2_down(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + /* Never really got started! */ + if (!sky2->tx_le) + return 0; + + netif_info(sky2, ifdown, dev, "disabling interface\n"); + + /* Disable port IRQ */ + sky2_write32(hw, B0_IMSK, + sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]); + sky2_read32(hw, B0_IMSK); + + synchronize_irq(hw->pdev->irq); + napi_synchronize(&hw->napi); + + sky2_hw_down(sky2); + + sky2_free_buffers(sky2); + + return 0; +} + +static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) +{ + if (hw->flags & SKY2_HW_FIBRE_PHY) + return SPEED_1000; + + if (!(hw->flags & SKY2_HW_GIGABIT)) { + if (aux & PHY_M_PS_SPEED_100) + return SPEED_100; + else + return SPEED_10; + } + + switch (aux & PHY_M_PS_SPEED_MSK) { + case PHY_M_PS_SPEED_1000: + return SPEED_1000; + case PHY_M_PS_SPEED_100: + return SPEED_100; + default: + return SPEED_10; + } +} + +static void sky2_link_up(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + static const char *fc_name[] = { + [FC_NONE] = "none", + [FC_TX] = "tx", + [FC_RX] = "rx", + [FC_BOTH] = "both", + }; + + sky2_set_ipg(sky2); + + sky2_enable_rx_tx(sky2); + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); + + netif_carrier_on(sky2->netdev); + + mod_timer(&hw->watchdog_timer, jiffies + 1); + + /* Turn on link LED */ + sky2_write8(hw, SK_REG(port, LNK_LED_REG), + LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); + + netif_info(sky2, link, sky2->netdev, + "Link is up at %d Mbps, %s duplex, flow control %s\n", + sky2->speed, + sky2->duplex == DUPLEX_FULL ? "full" : "half", + fc_name[sky2->flow_status]); +} + +static void sky2_link_down(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 reg; + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); + + reg = gma_read16(hw, port, GM_GP_CTRL); + reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + gma_write16(hw, port, GM_GP_CTRL, reg); + + netif_carrier_off(sky2->netdev); + + /* Turn off link LED */ + sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); + + netif_info(sky2, link, sky2->netdev, "Link is down\n"); + + sky2_phy_init(hw, port); +} + +static enum flow_control sky2_flow(int rx, int tx) +{ + if (rx) + return tx ? FC_BOTH : FC_RX; + else + return tx ? FC_TX : FC_NONE; +} + +static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 advert, lpa; + + advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); + lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); + if (lpa & PHY_M_AN_RF) { + netdev_err(sky2->netdev, "remote fault\n"); + return -1; + } + + if (!(aux & PHY_M_PS_SPDUP_RES)) { + netdev_err(sky2->netdev, "speed/duplex mismatch\n"); + return -1; + } + + sky2->speed = sky2_phy_speed(hw, aux); + sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + + /* Since the pause result bits seem to in different positions on + * different chips. look at registers. + */ + if (hw->flags & SKY2_HW_FIBRE_PHY) { + /* Shift for bits in fiber PHY */ + advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); + lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); + + if (advert & ADVERTISE_1000XPAUSE) + advert |= ADVERTISE_PAUSE_CAP; + if (advert & ADVERTISE_1000XPSE_ASYM) + advert |= ADVERTISE_PAUSE_ASYM; + if (lpa & LPA_1000XPAUSE) + lpa |= LPA_PAUSE_CAP; + if (lpa & LPA_1000XPAUSE_ASYM) + lpa |= LPA_PAUSE_ASYM; + } + + sky2->flow_status = FC_NONE; + if (advert & ADVERTISE_PAUSE_CAP) { + if (lpa & LPA_PAUSE_CAP) + sky2->flow_status = FC_BOTH; + else if (advert & ADVERTISE_PAUSE_ASYM) + sky2->flow_status = FC_RX; + } else if (advert & ADVERTISE_PAUSE_ASYM) { + if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM)) + sky2->flow_status = FC_TX; + } + + if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 && + !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)) + sky2->flow_status = FC_NONE; + + if (sky2->flow_status & FC_TX) + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + else + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + + return 0; +} + +/* Interrupt from PHY */ +static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) +{ + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); + u16 istatus, phystat; + + if (!netif_running(dev)) + return; + + spin_lock(&sky2->phy_lock); + istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + + netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n", + istatus, phystat); + + if (istatus & PHY_M_IS_AN_COMPL) { + if (sky2_autoneg_done(sky2, phystat) == 0 && + !netif_carrier_ok(dev)) + sky2_link_up(sky2); + goto out; + } + + if (istatus & PHY_M_IS_LSP_CHANGE) + sky2->speed = sky2_phy_speed(hw, phystat); + + if (istatus & PHY_M_IS_DUP_CHANGE) + sky2->duplex = + (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + + if (istatus & PHY_M_IS_LST_CHANGE) { + if (phystat & PHY_M_PS_LINK_UP) + sky2_link_up(sky2); + else + sky2_link_down(sky2); + } +out: + spin_unlock(&sky2->phy_lock); +} + +/* Special quick link interrupt (Yukon-2 Optima only) */ +static void sky2_qlink_intr(struct sky2_hw *hw) +{ + struct sky2_port *sky2 = netdev_priv(hw->dev[0]); + u32 imask; + u16 phy; + + /* disable irq */ + imask = sky2_read32(hw, B0_IMSK); + imask &= ~Y2_IS_PHY_QLNK; + sky2_write32(hw, B0_IMSK, imask); + + /* reset PHY Link Detect */ + phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + sky2_link_up(sky2); +} + +/* Transmit timeout is only called if we are running, carrier is up + * and tx queue is full (stopped). + */ +static void sky2_tx_timeout(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + netif_err(sky2, timer, dev, "tx timeout\n"); + + netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n", + sky2->tx_cons, sky2->tx_prod, + sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), + sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE))); + + /* can't restart safely under softirq */ + schedule_work(&hw->restart_work); +} + +static int sky2_change_mtu(struct net_device *dev, int new_mtu) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + int err; + u16 ctl, mode; + u32 imask; + + /* MTU size outside the spec */ + if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) + return -EINVAL; + + /* MTU > 1500 on yukon FE and FE+ not allowed */ + if (new_mtu > ETH_DATA_LEN && + (hw->chip_id == CHIP_ID_YUKON_FE || + hw->chip_id == CHIP_ID_YUKON_FE_P)) + return -EINVAL; + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + netdev_update_features(dev); + return 0; + } + + imask = sky2_read32(hw, B0_IMSK); + sky2_write32(hw, B0_IMSK, 0); + + dev->trans_start = jiffies; /* prevent tx timeout */ + napi_disable(&hw->napi); + netif_tx_disable(dev); + + synchronize_irq(hw->pdev->irq); + + if (!(hw->flags & SKY2_HW_RAM_BUFFER)) + sky2_set_tx_stfwd(hw, port); + + ctl = gma_read16(hw, port, GM_GP_CTRL); + gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); + sky2_rx_stop(sky2); + sky2_rx_clean(sky2); + + dev->mtu = new_mtu; + netdev_update_features(dev); + + mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA; + if (sky2->speed > SPEED_100) + mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000); + else + mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); + + if (dev->mtu > ETH_DATA_LEN) + mode |= GM_SMOD_JUMBO_ENA; + + gma_write16(hw, port, GM_SERIAL_MODE, mode); + + sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); + + err = sky2_alloc_rx_skbs(sky2); + if (!err) + sky2_rx_start(sky2); + else + sky2_rx_clean(sky2); + sky2_write32(hw, B0_IMSK, imask); + + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); + + if (err) + dev_close(dev); + else { + gma_write16(hw, port, GM_GP_CTRL, ctl); + + netif_wake_queue(dev); + } + + return err; +} + +/* For small just reuse existing skb for next receive */ +static struct sk_buff *receive_copy(struct sky2_port *sky2, + const struct rx_ring_info *re, + unsigned length) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(sky2->netdev, length); + if (likely(skb)) { + pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, + length, PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(re->skb, skb->data, length); + skb->ip_summed = re->skb->ip_summed; + skb->csum = re->skb->csum; + pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, + length, PCI_DMA_FROMDEVICE); + re->skb->ip_summed = CHECKSUM_NONE; + skb_put(skb, length); + } + return skb; +} + +/* Adjust length of skb with fragments to match received data */ +static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, + unsigned int length) +{ + int i, num_frags; + unsigned int size; + + /* put header into skb */ + size = min(length, hdr_space); + skb->tail += size; + skb->len += size; + length -= size; + + num_frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < num_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + if (length == 0) { + /* don't need this page */ + __free_page(frag->page); + --skb_shinfo(skb)->nr_frags; + } else { + size = min(length, (unsigned) PAGE_SIZE); + + frag->size = size; + skb->data_len += size; + skb->truesize += size; + skb->len += size; + length -= size; + } + } +} + +/* Normal packet - take skb from ring element and put in a new one */ +static struct sk_buff *receive_new(struct sky2_port *sky2, + struct rx_ring_info *re, + unsigned int length) +{ + struct sk_buff *skb; + struct rx_ring_info nre; + unsigned hdr_space = sky2->rx_data_size; + + nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC); + if (unlikely(!nre.skb)) + goto nobuf; + + if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space)) + goto nomap; + + skb = re->skb; + sky2_rx_unmap_skb(sky2->hw->pdev, re); + prefetch(skb->data); + *re = nre; + + if (skb_shinfo(skb)->nr_frags) + skb_put_frags(skb, hdr_space, length); + else + skb_put(skb, length); + return skb; + +nomap: + dev_kfree_skb(nre.skb); +nobuf: + return NULL; +} + +/* + * Receive one packet. + * For larger packets, get new buffer. + */ +static struct sk_buff *sky2_receive(struct net_device *dev, + u16 length, u32 status) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; + struct sk_buff *skb = NULL; + u16 count = (status & GMR_FS_LEN) >> 16; + + if (status & GMR_FS_VLAN) + count -= VLAN_HLEN; /* Account for vlan tag */ + + netif_printk(sky2, rx_status, KERN_DEBUG, dev, + "rx slot %u status 0x%x len %d\n", + sky2->rx_next, status, length); + + sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; + prefetch(sky2->rx_ring + sky2->rx_next); + + /* This chip has hardware problems that generates bogus status. + * So do only marginal checking and expect higher level protocols + * to handle crap frames. + */ + if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && + sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && + length != count) + goto okay; + + if (status & GMR_FS_ANY_ERR) + goto error; + + if (!(status & GMR_FS_RX_OK)) + goto resubmit; + + /* if length reported by DMA does not match PHY, packet was truncated */ + if (length != count) + goto error; + +okay: + if (length < copybreak) + skb = receive_copy(sky2, re, length); + else + skb = receive_new(sky2, re, length); + + dev->stats.rx_dropped += (skb == NULL); + +resubmit: + sky2_rx_submit(sky2, re); + + return skb; + +error: + ++dev->stats.rx_errors; + + if (net_ratelimit()) + netif_info(sky2, rx_err, dev, + "rx error, status 0x%x length %d\n", status, length); + + goto resubmit; +} + +/* Transmit complete */ +static inline void sky2_tx_done(struct net_device *dev, u16 last) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (netif_running(dev)) { + sky2_tx_complete(sky2, last); + + /* Wake unless it's detached, and called e.g. from sky2_down() */ + if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) + netif_wake_queue(dev); + } +} + +static inline void sky2_skb_rx(const struct sky2_port *sky2, + u32 status, struct sk_buff *skb) +{ + if (status & GMR_FS_VLAN) + __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag)); + + if (skb->ip_summed == CHECKSUM_NONE) + netif_receive_skb(skb); + else + napi_gro_receive(&sky2->hw->napi, skb); +} + +static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, + unsigned packets, unsigned bytes) +{ + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (packets == 0) + return; + + u64_stats_update_begin(&sky2->rx_stats.syncp); + sky2->rx_stats.packets += packets; + sky2->rx_stats.bytes += bytes; + u64_stats_update_end(&sky2->rx_stats.syncp); + + dev->last_rx = jiffies; + sky2_rx_update(netdev_priv(dev), rxqaddr[port]); +} + +static void sky2_rx_checksum(struct sky2_port *sky2, u32 status) +{ + /* If this happens then driver assuming wrong format for chip type */ + BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE); + + /* Both checksum counters are programmed to start at + * the same offset, so unless there is a problem they + * should match. This failure is an early indication that + * hardware receive checksumming won't work. + */ + if (likely((u16)(status >> 16) == (u16)status)) { + struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb; + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = le16_to_cpu(status); + } else { + dev_notice(&sky2->hw->pdev->dev, + "%s: receive checksum problem (status = %#x)\n", + sky2->netdev->name, status); + + /* Disable checksum offload + * It will be reenabled on next ndo_set_features, but if it's + * really broken, will get disabled again + */ + sky2->netdev->features &= ~NETIF_F_RXCSUM; + sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + BMU_DIS_RX_CHKSUM); + } +} + +static void sky2_rx_hash(struct sky2_port *sky2, u32 status) +{ + struct sk_buff *skb; + + skb = sky2->rx_ring[sky2->rx_next].skb; + skb->rxhash = le32_to_cpu(status); +} + +/* Process status response ring */ +static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) +{ + int work_done = 0; + unsigned int total_bytes[2] = { 0 }; + unsigned int total_packets[2] = { 0 }; + + rmb(); + do { + struct sky2_port *sky2; + struct sky2_status_le *le = hw->st_le + hw->st_idx; + unsigned port; + struct net_device *dev; + struct sk_buff *skb; + u32 status; + u16 length; + u8 opcode = le->opcode; + + if (!(opcode & HW_OWNER)) + break; + + hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size); + + port = le->css & CSS_LINK_BIT; + dev = hw->dev[port]; + sky2 = netdev_priv(dev); + length = le16_to_cpu(le->length); + status = le32_to_cpu(le->status); + + le->opcode = 0; + switch (opcode & ~HW_OWNER) { + case OP_RXSTAT: + total_packets[port]++; + total_bytes[port] += length; + + skb = sky2_receive(dev, length, status); + if (!skb) + break; + + /* This chip reports checksum status differently */ + if (hw->flags & SKY2_HW_NEW_LE) { + if ((dev->features & NETIF_F_RXCSUM) && + (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && + (le->css & CSS_TCPUDPCSOK)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } + + skb->protocol = eth_type_trans(skb, dev); + + sky2_skb_rx(sky2, status, skb); + + /* Stop after net poll weight */ + if (++work_done >= to_do) + goto exit_loop; + break; + + case OP_RXVLAN: + sky2->rx_tag = length; + break; + + case OP_RXCHKSVLAN: + sky2->rx_tag = length; + /* fall through */ + case OP_RXCHKS: + if (likely(dev->features & NETIF_F_RXCSUM)) + sky2_rx_checksum(sky2, status); + break; + + case OP_RSS_HASH: + sky2_rx_hash(sky2, status); + break; + + case OP_TXINDEXLE: + /* TX index reports status for both ports */ + sky2_tx_done(hw->dev[0], status & 0xfff); + if (hw->dev[1]) + sky2_tx_done(hw->dev[1], + ((status >> 24) & 0xff) + | (u16)(length & 0xf) << 8); + break; + + default: + if (net_ratelimit()) + pr_warning("unknown status opcode 0x%x\n", opcode); + } + } while (hw->st_idx != idx); + + /* Fully processed status ring so clear irq */ + sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); + +exit_loop: + sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]); + sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]); + + return work_done; +} + +static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) +{ + struct net_device *dev = hw->dev[port]; + + if (net_ratelimit()) + netdev_info(dev, "hw error interrupt status 0x%x\n", status); + + if (status & Y2_IS_PAR_RD1) { + if (net_ratelimit()) + netdev_err(dev, "ram data read parity error\n"); + /* Clear IRQ */ + sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); + } + + if (status & Y2_IS_PAR_WR1) { + if (net_ratelimit()) + netdev_err(dev, "ram data write parity error\n"); + + sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); + } + + if (status & Y2_IS_PAR_MAC1) { + if (net_ratelimit()) + netdev_err(dev, "MAC parity error\n"); + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); + } + + if (status & Y2_IS_PAR_RX1) { + if (net_ratelimit()) + netdev_err(dev, "RX parity error\n"); + sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); + } + + if (status & Y2_IS_TCP_TXA1) { + if (net_ratelimit()) + netdev_err(dev, "TCP segmentation error\n"); + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); + } +} + +static void sky2_hw_intr(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u32 status = sky2_read32(hw, B0_HWE_ISRC); + u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); + + status &= hwmsk; + + if (status & Y2_IS_TIST_OV) + sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + + if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { + u16 pci_err; + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_err = sky2_pci_read16(hw, PCI_STATUS); + if (net_ratelimit()) + dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", + pci_err); + + sky2_pci_write16(hw, PCI_STATUS, + pci_err | PCI_STATUS_ERROR_BITS); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + } + + if (status & Y2_IS_PCI_EXP) { + /* PCI-Express uncorrectable Error occurred */ + u32 err; + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); + if (net_ratelimit()) + dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); + + sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + } + + if (status & Y2_HWE_L1_MASK) + sky2_hw_error(hw, 0, status); + status >>= 8; + if (status & Y2_HWE_L1_MASK) + sky2_hw_error(hw, 1, status); +} + +static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) +{ + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); + u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); + + netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status); + + if (status & GM_IS_RX_CO_OV) + gma_read16(hw, port, GM_RX_IRQ_SRC); + + if (status & GM_IS_TX_CO_OV) + gma_read16(hw, port, GM_TX_IRQ_SRC); + + if (status & GM_IS_RX_FF_OR) { + ++dev->stats.rx_fifo_errors; + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); + } + + if (status & GM_IS_TX_FF_UR) { + ++dev->stats.tx_fifo_errors; + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); + } +} + +/* This should never happen it is a bug. */ +static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q) +{ + struct net_device *dev = hw->dev[port]; + u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); + + dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n", + dev->name, (unsigned) q, (unsigned) idx, + (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); + + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); +} + +static int sky2_rx_hung(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned rxq = rxqaddr[port]; + u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP)); + u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV)); + u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); + u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); + + /* If idle and MAC or PCI is stuck */ + if (sky2->check.last == dev->last_rx && + ((mac_rp == sky2->check.mac_rp && + mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || + /* Check if the PCI RX hang */ + (fifo_rp == sky2->check.fifo_rp && + fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { + netdev_printk(KERN_DEBUG, dev, + "hung mac %d:%d fifo %d (%d:%d)\n", + mac_lev, mac_rp, fifo_lev, + fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); + return 1; + } else { + sky2->check.last = dev->last_rx; + sky2->check.mac_rp = mac_rp; + sky2->check.mac_lev = mac_lev; + sky2->check.fifo_rp = fifo_rp; + sky2->check.fifo_lev = fifo_lev; + return 0; + } +} + +static void sky2_watchdog(unsigned long arg) +{ + struct sky2_hw *hw = (struct sky2_hw *) arg; + + /* Check for lost IRQ once a second */ + if (sky2_read32(hw, B0_ISRC)) { + napi_schedule(&hw->napi); + } else { + int i, active = 0; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + if (!netif_running(dev)) + continue; + ++active; + + /* For chips with Rx FIFO, check if stuck */ + if ((hw->flags & SKY2_HW_RAM_BUFFER) && + sky2_rx_hung(dev)) { + netdev_info(dev, "receiver hang detected\n"); + schedule_work(&hw->restart_work); + return; + } + } + + if (active == 0) + return; + } + + mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); +} + +/* Hardware/software error handling */ +static void sky2_err_intr(struct sky2_hw *hw, u32 status) +{ + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); + + if (status & Y2_IS_HW_ERR) + sky2_hw_intr(hw); + + if (status & Y2_IS_IRQ_MAC1) + sky2_mac_intr(hw, 0); + + if (status & Y2_IS_IRQ_MAC2) + sky2_mac_intr(hw, 1); + + if (status & Y2_IS_CHK_RX1) + sky2_le_error(hw, 0, Q_R1); + + if (status & Y2_IS_CHK_RX2) + sky2_le_error(hw, 1, Q_R2); + + if (status & Y2_IS_CHK_TXA1) + sky2_le_error(hw, 0, Q_XA1); + + if (status & Y2_IS_CHK_TXA2) + sky2_le_error(hw, 1, Q_XA2); +} + +static int sky2_poll(struct napi_struct *napi, int work_limit) +{ + struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); + u32 status = sky2_read32(hw, B0_Y2_SP_EISR); + int work_done = 0; + u16 idx; + + if (unlikely(status & Y2_IS_ERROR)) + sky2_err_intr(hw, status); + + if (status & Y2_IS_IRQ_PHY1) + sky2_phy_intr(hw, 0); + + if (status & Y2_IS_IRQ_PHY2) + sky2_phy_intr(hw, 1); + + if (status & Y2_IS_PHY_QLNK) + sky2_qlink_intr(hw); + + while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { + work_done += sky2_status_intr(hw, work_limit - work_done, idx); + + if (work_done >= work_limit) + goto done; + } + + napi_complete(napi); + sky2_read32(hw, B0_Y2_SP_LISR); +done: + + return work_done; +} + +static irqreturn_t sky2_intr(int irq, void *dev_id) +{ + struct sky2_hw *hw = dev_id; + u32 status; + + /* Reading this mask interrupts as side effect */ + status = sky2_read32(hw, B0_Y2_SP_ISRC2); + if (status == 0 || status == ~0) + return IRQ_NONE; + + prefetch(&hw->st_le[hw->st_idx]); + + napi_schedule(&hw->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sky2_netpoll(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + napi_schedule(&sky2->hw->napi); +} +#endif + +/* Chip internal frequency for clock calculations */ +static u32 sky2_mhz(const struct sky2_hw *hw) +{ + switch (hw->chip_id) { + case CHIP_ID_YUKON_EC: + case CHIP_ID_YUKON_EC_U: + case CHIP_ID_YUKON_EX: + case CHIP_ID_YUKON_SUPR: + case CHIP_ID_YUKON_UL_2: + case CHIP_ID_YUKON_OPT: + case CHIP_ID_YUKON_PRM: + case CHIP_ID_YUKON_OP_2: + return 125; + + case CHIP_ID_YUKON_FE: + return 100; + + case CHIP_ID_YUKON_FE_P: + return 50; + + case CHIP_ID_YUKON_XL: + return 156; + + default: + BUG(); + } +} + +static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us) +{ + return sky2_mhz(hw) * us; +} + +static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) +{ + return clk / sky2_mhz(hw); +} + + +static int __devinit sky2_init(struct sky2_hw *hw) +{ + u8 t8; + + /* Enable all clocks and check for bad PCI access */ + sky2_pci_write32(hw, PCI_DEV_REG3, 0); + + sky2_write8(hw, B0_CTST, CS_RST_CLR); + + hw->chip_id = sky2_read8(hw, B2_CHIP_ID); + hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; + + switch (hw->chip_id) { + case CHIP_ID_YUKON_XL: + hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; + if (hw->chip_rev < CHIP_REV_YU_XL_A2) + hw->flags |= SKY2_HW_RSS_BROKEN; + break; + + case CHIP_ID_YUKON_EC_U: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_ADV_POWER_CTL; + break; + + case CHIP_ID_YUKON_EX: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_ADV_POWER_CTL + | SKY2_HW_RSS_CHKSUM; + + /* New transmit checksum */ + if (hw->chip_rev != CHIP_REV_YU_EX_B0) + hw->flags |= SKY2_HW_AUTO_TX_SUM; + break; + + case CHIP_ID_YUKON_EC: + /* This rev is really old, and requires untested workarounds */ + if (hw->chip_rev == CHIP_REV_YU_EC_A1) { + dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); + return -EOPNOTSUPP; + } + hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN; + break; + + case CHIP_ID_YUKON_FE: + hw->flags = SKY2_HW_RSS_BROKEN; + break; + + case CHIP_ID_YUKON_FE_P: + hw->flags = SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_AUTO_TX_SUM + | SKY2_HW_ADV_POWER_CTL; + + /* The workaround for status conflicts VLAN tag detection. */ + if (hw->chip_rev == CHIP_REV_YU_FE2_A0) + hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM; + break; + + case CHIP_ID_YUKON_SUPR: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_AUTO_TX_SUM + | SKY2_HW_ADV_POWER_CTL; + + if (hw->chip_rev == CHIP_REV_YU_SU_A0) + hw->flags |= SKY2_HW_RSS_CHKSUM; + break; + + case CHIP_ID_YUKON_UL_2: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_ADV_POWER_CTL; + break; + + case CHIP_ID_YUKON_OPT: + case CHIP_ID_YUKON_PRM: + case CHIP_ID_YUKON_OP_2: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEW_LE + | SKY2_HW_ADV_POWER_CTL; + break; + + default: + dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", + hw->chip_id); + return -EOPNOTSUPP; + } + + hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); + if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') + hw->flags |= SKY2_HW_FIBRE_PHY; + + hw->ports = 1; + t8 = sky2_read8(hw, B2_Y2_HW_RES); + if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { + if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) + ++hw->ports; + } + + if (sky2_read8(hw, B2_E_0)) + hw->flags |= SKY2_HW_RAM_BUFFER; + + return 0; +} + +static void sky2_reset(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u16 status; + int i; + u32 hwe_mask = Y2_HWE_ALL_MASK; + + /* disable ASF */ + if (hw->chip_id == CHIP_ID_YUKON_EX + || hw->chip_id == CHIP_ID_YUKON_SUPR) { + sky2_write32(hw, CPU_WDOG, 0); + status = sky2_read16(hw, HCU_CCSR); + status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | + HCU_CCSR_UC_STATE_MSK); + /* + * CPU clock divider shouldn't be used because + * - ASF firmware may malfunction + * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks + */ + status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK; + sky2_write16(hw, HCU_CCSR, status); + sky2_write32(hw, CPU_WDOG, 0); + } else + sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); + sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); + + /* do a SW reset */ + sky2_write8(hw, B0_CTST, CS_RST_SET); + sky2_write8(hw, B0_CTST, CS_RST_CLR); + + /* allow writes to PCI config */ + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + + /* clear PCI errors, if any */ + status = sky2_pci_read16(hw, PCI_STATUS); + status |= PCI_STATUS_ERROR_BITS; + sky2_pci_write16(hw, PCI_STATUS, status); + + sky2_write8(hw, B0_CTST, CS_MRST_CLR); + + if (pci_is_pcie(pdev)) { + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); + + /* If error bit is stuck on ignore it */ + if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) + dev_info(&pdev->dev, "ignoring stuck error report bit\n"); + else + hwe_mask |= Y2_IS_PCI_EXP; + } + + sky2_power_on(hw); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + for (i = 0; i < hw->ports; i++) { + sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); + sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_SUPR) + sky2_write16(hw, SK_REG(i, GMAC_CTRL), + GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON + | GMC_BYP_RETR_ON); + + } + + if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) { + /* enable MACSec clock gating */ + sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS); + } + + if (hw->chip_id == CHIP_ID_YUKON_OPT || + hw->chip_id == CHIP_ID_YUKON_PRM || + hw->chip_id == CHIP_ID_YUKON_OP_2) { + u16 reg; + u32 msk; + + if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { + /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ + sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7)); + + /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */ + reg = 10; + + /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ + sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); + } else { + /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */ + reg = 3; + } + + reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; + reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT; + + /* reset PHY Link Detect */ + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); + + /* enable PHY Quick Link */ + msk = sky2_read32(hw, B0_IMSK); + msk |= Y2_IS_PHY_QLNK; + sky2_write32(hw, B0_IMSK, msk); + + /* check if PSMv2 was running before */ + reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); + if (reg & PCI_EXP_LNKCTL_ASPMC) + /* restore the PCIe Link Control register */ + sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL, + reg); + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ + sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); + } + + /* Clear I2C IRQ noise */ + sky2_write32(hw, B2_I2C_IRQ, 1); + + /* turn off hardware timer (unused) */ + sky2_write8(hw, B2_TI_CTRL, TIM_STOP); + sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); + + /* Turn off descriptor polling */ + sky2_write32(hw, B28_DPT_CTRL, DPT_STOP); + + /* Turn off receive timestamp */ + sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP); + sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + + /* enable the Tx Arbiters */ + for (i = 0; i < hw->ports; i++) + sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); + + /* Initialize ram interface */ + for (i = 0; i < hw->ports; i++) { + sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); + + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); + } + + sky2_write32(hw, B0_HWE_IMSK, hwe_mask); + + for (i = 0; i < hw->ports; i++) + sky2_gmac_reset(hw, i); + + memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le)); + hw->st_idx = 0; + + sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); + sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR); + + sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); + sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); + + /* Set the list last index */ + sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1); + + sky2_write16(hw, STAT_TX_IDX_TH, 10); + sky2_write8(hw, STAT_FIFO_WM, 16); + + /* set Status-FIFO ISR watermark */ + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) + sky2_write8(hw, STAT_FIFO_ISR_WM, 4); + else + sky2_write8(hw, STAT_FIFO_ISR_WM, 16); + + sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); + sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); + sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); + + /* enable status unit */ + sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); + + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); + sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); +} + +/* Take device down (offline). + * Equivalent to doing dev_stop() but this does not + * inform upper layers of the transition. + */ +static void sky2_detach(struct net_device *dev) +{ + if (netif_running(dev)) { + netif_tx_lock(dev); + netif_device_detach(dev); /* stop txq */ + netif_tx_unlock(dev); + sky2_down(dev); + } +} + +/* Bring device back after doing sky2_detach */ +static int sky2_reattach(struct net_device *dev) +{ + int err = 0; + + if (netif_running(dev)) { + err = sky2_up(dev); + if (err) { + netdev_info(dev, "could not restart %d\n", err); + dev_close(dev); + } else { + netif_device_attach(dev); + sky2_set_multicast(dev); + } + } + + return err; +} + +static void sky2_all_down(struct sky2_hw *hw) +{ + int i; + + sky2_read32(hw, B0_IMSK); + sky2_write32(hw, B0_IMSK, 0); + synchronize_irq(hw->pdev->irq); + napi_disable(&hw->napi); + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (!netif_running(dev)) + continue; + + netif_carrier_off(dev); + netif_tx_disable(dev); + sky2_hw_down(sky2); + } +} + +static void sky2_all_up(struct sky2_hw *hw) +{ + u32 imask = Y2_IS_BASE; + int i; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (!netif_running(dev)) + continue; + + sky2_hw_up(sky2); + sky2_set_multicast(dev); + imask |= portirq_msk[i]; + netif_wake_queue(dev); + } + + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); +} + +static void sky2_restart(struct work_struct *work) +{ + struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work); + + rtnl_lock(); + + sky2_all_down(hw); + sky2_reset(hw); + sky2_all_up(hw); + + rtnl_unlock(); +} + +static inline u8 sky2_wol_supported(const struct sky2_hw *hw) +{ + return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; +} + +static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + + wol->supported = sky2_wol_supported(sky2->hw); + wol->wolopts = sky2->wol; +} + +static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + bool enable_wakeup = false; + int i; + + if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) || + !device_can_wakeup(&hw->pdev->dev)) + return -EOPNOTSUPP; + + sky2->wol = wol->wolopts; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (sky2->wol) + enable_wakeup = true; + } + device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup); + + return 0; +} + +static u32 sky2_supported_modes(const struct sky2_hw *hw) +{ + if (sky2_is_copper(hw)) { + u32 modes = SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full; + + if (hw->flags & SKY2_HW_GIGABIT) + modes |= SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full; + return modes; + } else + return SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full; +} + +static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = sky2_supported_modes(hw); + ecmd->phy_address = PHY_ADDR_MARV; + if (sky2_is_copper(hw)) { + ecmd->port = PORT_TP; + ethtool_cmd_speed_set(ecmd, sky2->speed); + ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_1000); + ecmd->port = PORT_FIBRE; + ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; + } + + ecmd->advertising = sky2->advertising; + ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) + ? AUTONEG_ENABLE : AUTONEG_DISABLE; + ecmd->duplex = sky2->duplex; + return 0; +} + +static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + const struct sky2_hw *hw = sky2->hw; + u32 supported = sky2_supported_modes(hw); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ecmd->advertising & ~supported) + return -EINVAL; + + if (sky2_is_copper(hw)) + sky2->advertising = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + else + sky2->advertising = ecmd->advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + + sky2->flags |= SKY2_FLAG_AUTO_SPEED; + sky2->duplex = -1; + sky2->speed = -1; + } else { + u32 setting; + u32 speed = ethtool_cmd_speed(ecmd); + + switch (speed) { + case SPEED_1000: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_1000baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_1000baseT_Half; + else + return -EINVAL; + break; + case SPEED_100: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_100baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_100baseT_Half; + else + return -EINVAL; + break; + + case SPEED_10: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_10baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_10baseT_Half; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((setting & supported) == 0) + return -EINVAL; + + sky2->speed = speed; + sky2->duplex = ecmd->duplex; + sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; + } + + if (netif_running(dev)) { + sky2_phy_reinit(sky2); + sky2_set_multicast(dev); + } + + return 0; +} + +static void sky2_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); + strcpy(info->bus_info, pci_name(sky2->hw->pdev)); +} + +static const struct sky2_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} sky2_stats[] = { + { "tx_bytes", GM_TXO_OK_HI }, + { "rx_bytes", GM_RXO_OK_HI }, + { "tx_broadcast", GM_TXF_BC_OK }, + { "rx_broadcast", GM_RXF_BC_OK }, + { "tx_multicast", GM_TXF_MC_OK }, + { "rx_multicast", GM_RXF_MC_OK }, + { "tx_unicast", GM_TXF_UC_OK }, + { "rx_unicast", GM_RXF_UC_OK }, + { "tx_mac_pause", GM_TXF_MPAUSE }, + { "rx_mac_pause", GM_RXF_MPAUSE }, + { "collisions", GM_TXF_COL }, + { "late_collision",GM_TXF_LAT_COL }, + { "aborted", GM_TXF_ABO_COL }, + { "single_collisions", GM_TXF_SNG_COL }, + { "multi_collisions", GM_TXF_MUL_COL }, + + { "rx_short", GM_RXF_SHT }, + { "rx_runt", GM_RXE_FRAG }, + { "rx_64_byte_packets", GM_RXF_64B }, + { "rx_65_to_127_byte_packets", GM_RXF_127B }, + { "rx_128_to_255_byte_packets", GM_RXF_255B }, + { "rx_256_to_511_byte_packets", GM_RXF_511B }, + { "rx_512_to_1023_byte_packets", GM_RXF_1023B }, + { "rx_1024_to_1518_byte_packets", GM_RXF_1518B }, + { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ }, + { "rx_too_long", GM_RXF_LNG_ERR }, + { "rx_fifo_overflow", GM_RXE_FIFO_OV }, + { "rx_jabber", GM_RXF_JAB_PKT }, + { "rx_fcs_error", GM_RXF_FCS_ERR }, + + { "tx_64_byte_packets", GM_TXF_64B }, + { "tx_65_to_127_byte_packets", GM_TXF_127B }, + { "tx_128_to_255_byte_packets", GM_TXF_255B }, + { "tx_256_to_511_byte_packets", GM_TXF_511B }, + { "tx_512_to_1023_byte_packets", GM_TXF_1023B }, + { "tx_1024_to_1518_byte_packets", GM_TXF_1518B }, + { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ }, + { "tx_fifo_underrun", GM_TXE_FIFO_UR }, +}; + +static u32 sky2_get_msglevel(struct net_device *netdev) +{ + struct sky2_port *sky2 = netdev_priv(netdev); + return sky2->msg_enable; +} + +static int sky2_nway_reset(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED)) + return -EINVAL; + + sky2_phy_reinit(sky2); + sky2_set_multicast(dev); + + return 0; +} + +static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + int i; + + data[0] = get_stats64(hw, port, GM_TXO_OK_LO); + data[1] = get_stats64(hw, port, GM_RXO_OK_LO); + + for (i = 2; i < count; i++) + data[i] = get_stats32(hw, port, sky2_stats[i].offset); +} + +static void sky2_set_msglevel(struct net_device *netdev, u32 value) +{ + struct sky2_port *sky2 = netdev_priv(netdev); + sky2->msg_enable = value; +} + +static int sky2_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(sky2_stats); + default: + return -EOPNOTSUPP; + } +} + +static void sky2_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats)); +} + +static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(sky2_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + sky2_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static int sky2_set_mac_address(struct net_device *dev, void *p) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + const struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_1 + port * 8, + dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port * 8, + dev->dev_addr, ETH_ALEN); + + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + + return 0; +} + +static inline void sky2_add_filter(u8 filter[8], const u8 *addr) +{ + u32 bit; + + bit = ether_crc(ETH_ALEN, addr) & 63; + filter[bit >> 3] |= 1 << (bit & 7); +} + +static void sky2_set_multicast(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + struct netdev_hw_addr *ha; + u16 reg; + u8 filter[8]; + int rx_pause; + static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; + + rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH); + memset(filter, 0, sizeof(filter)); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA; + + if (dev->flags & IFF_PROMISC) /* promiscuous */ + reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + else if (dev->flags & IFF_ALLMULTI) + memset(filter, 0xff, sizeof(filter)); + else if (netdev_mc_empty(dev) && !rx_pause) + reg &= ~GM_RXCR_MCF_ENA; + else { + reg |= GM_RXCR_MCF_ENA; + + if (rx_pause) + sky2_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + sky2_add_filter(filter, ha->addr); + } + + gma_write16(hw, port, GM_MC_ADDR_H1, + (u16) filter[0] | ((u16) filter[1] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H2, + (u16) filter[2] | ((u16) filter[3] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H3, + (u16) filter[4] | ((u16) filter[5] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H4, + (u16) filter[6] | ((u16) filter[7] << 8)); + + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned int start; + u64 _bytes, _packets; + + do { + start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp); + _bytes = sky2->rx_stats.bytes; + _packets = sky2->rx_stats.packets; + } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start)); + + stats->rx_packets = _packets; + stats->rx_bytes = _bytes; + + do { + start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp); + _bytes = sky2->tx_stats.bytes; + _packets = sky2->tx_stats.packets; + } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start)); + + stats->tx_packets = _packets; + stats->tx_bytes = _bytes; + + stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK) + + get_stats32(hw, port, GM_RXF_BC_OK); + + stats->collisions = get_stats32(hw, port, GM_TXF_COL); + + stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR); + stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR); + stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT) + + get_stats32(hw, port, GM_RXE_FRAG); + stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV); + + stats->rx_dropped = dev->stats.rx_dropped; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->tx_fifo_errors = dev->stats.tx_fifo_errors; + + return stats; +} + +/* Can have one global because blinking is controlled by + * ethtool and that is always under RTNL mutex + */ +static void sky2_led(struct sky2_port *sky2, enum led_mode mode) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + + spin_lock_bh(&sky2->phy_lock); + if (hw->chip_id == CHIP_ID_YUKON_EC_U || + hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_SUPR) { + u16 pg; + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + switch (mode) { + case MO_LED_OFF: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(8) | + PHY_M_LEDC_INIT_CTRL(8) | + PHY_M_LEDC_STA1_CTRL(8) | + PHY_M_LEDC_STA0_CTRL(8)); + break; + case MO_LED_ON: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(9) | + PHY_M_LEDC_INIT_CTRL(9) | + PHY_M_LEDC_STA1_CTRL(9) | + PHY_M_LEDC_STA0_CTRL(9)); + break; + case MO_LED_BLINK: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(0xa) | + PHY_M_LEDC_INIT_CTRL(0xa) | + PHY_M_LEDC_STA1_CTRL(0xa) | + PHY_M_LEDC_STA0_CTRL(0xa)); + break; + case MO_LED_NORM: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(1) | + PHY_M_LEDC_INIT_CTRL(8) | + PHY_M_LEDC_STA1_CTRL(7) | + PHY_M_LEDC_STA0_CTRL(7)); + } + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + } else + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(mode) | + PHY_M_LED_MO_10(mode) | + PHY_M_LED_MO_100(mode) | + PHY_M_LED_MO_1000(mode) | + PHY_M_LED_MO_RX(mode) | + PHY_M_LED_MO_TX(mode)); + + spin_unlock_bh(&sky2->phy_lock); +} + +/* blink LED's for finding board */ +static int sky2_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + case ETHTOOL_ID_INACTIVE: + sky2_led(sky2, MO_LED_NORM); + break; + case ETHTOOL_ID_ON: + sky2_led(sky2, MO_LED_ON); + break; + case ETHTOOL_ID_OFF: + sky2_led(sky2, MO_LED_OFF); + break; + } + + return 0; +} + +static void sky2_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + switch (sky2->flow_mode) { + case FC_NONE: + ecmd->tx_pause = ecmd->rx_pause = 0; + break; + case FC_TX: + ecmd->tx_pause = 1, ecmd->rx_pause = 0; + break; + case FC_RX: + ecmd->tx_pause = 0, ecmd->rx_pause = 1; + break; + case FC_BOTH: + ecmd->tx_pause = ecmd->rx_pause = 1; + } + + ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE) + ? AUTONEG_ENABLE : AUTONEG_DISABLE; +} + +static int sky2_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (ecmd->autoneg == AUTONEG_ENABLE) + sky2->flags |= SKY2_FLAG_AUTO_PAUSE; + else + sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE; + + sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause); + + if (netif_running(dev)) + sky2_phy_reinit(sky2); + + return 0; +} + +static int sky2_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP) + ecmd->tx_coalesce_usecs = 0; + else { + u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI); + ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks); + } + ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH); + + if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP) + ecmd->rx_coalesce_usecs = 0; + else { + u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI); + ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks); + } + ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM); + + if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP) + ecmd->rx_coalesce_usecs_irq = 0; + else { + u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI); + ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks); + } + + ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM); + + return 0; +} + +/* Note: this affect both ports */ +static int sky2_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + const u32 tmax = sky2_clk2us(hw, 0x0ffffff); + + if (ecmd->tx_coalesce_usecs > tmax || + ecmd->rx_coalesce_usecs > tmax || + ecmd->rx_coalesce_usecs_irq > tmax) + return -EINVAL; + + if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1) + return -EINVAL; + if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) + return -EINVAL; + if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING) + return -EINVAL; + + if (ecmd->tx_coalesce_usecs == 0) + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); + else { + sky2_write32(hw, STAT_TX_TIMER_INI, + sky2_us2clk(hw, ecmd->tx_coalesce_usecs)); + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); + } + sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames); + + if (ecmd->rx_coalesce_usecs == 0) + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); + else { + sky2_write32(hw, STAT_LEV_TIMER_INI, + sky2_us2clk(hw, ecmd->rx_coalesce_usecs)); + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); + } + sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames); + + if (ecmd->rx_coalesce_usecs_irq == 0) + sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP); + else { + sky2_write32(hw, STAT_ISR_TIMER_INI, + sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq)); + sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); + } + sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq); + return 0; +} + +static void sky2_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + ering->rx_max_pending = RX_MAX_PENDING; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->tx_max_pending = TX_MAX_PENDING; + + ering->rx_pending = sky2->rx_pending; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->tx_pending = sky2->tx_pending; +} + +static int sky2_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (ering->rx_pending > RX_MAX_PENDING || + ering->rx_pending < 8 || + ering->tx_pending < TX_MIN_PENDING || + ering->tx_pending > TX_MAX_PENDING) + return -EINVAL; + + sky2_detach(dev); + + sky2->rx_pending = ering->rx_pending; + sky2->tx_pending = ering->tx_pending; + sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1); + + return sky2_reattach(dev); +} + +static int sky2_get_regs_len(struct net_device *dev) +{ + return 0x4000; +} + +static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b) +{ + /* This complicated switch statement is to make sure and + * only access regions that are unreserved. + * Some blocks are only valid on dual port cards. + */ + switch (b) { + /* second port */ + case 5: /* Tx Arbiter 2 */ + case 9: /* RX2 */ + case 14 ... 15: /* TX2 */ + case 17: case 19: /* Ram Buffer 2 */ + case 22 ... 23: /* Tx Ram Buffer 2 */ + case 25: /* Rx MAC Fifo 1 */ + case 27: /* Tx MAC Fifo 2 */ + case 31: /* GPHY 2 */ + case 40 ... 47: /* Pattern Ram 2 */ + case 52: case 54: /* TCP Segmentation 2 */ + case 112 ... 116: /* GMAC 2 */ + return hw->ports > 1; + + case 0: /* Control */ + case 2: /* Mac address */ + case 4: /* Tx Arbiter 1 */ + case 7: /* PCI express reg */ + case 8: /* RX1 */ + case 12 ... 13: /* TX1 */ + case 16: case 18:/* Rx Ram Buffer 1 */ + case 20 ... 21: /* Tx Ram Buffer 1 */ + case 24: /* Rx MAC Fifo 1 */ + case 26: /* Tx MAC Fifo 1 */ + case 28 ... 29: /* Descriptor and status unit */ + case 30: /* GPHY 1*/ + case 32 ... 39: /* Pattern Ram 1 */ + case 48: case 50: /* TCP Segmentation 1 */ + case 56 ... 60: /* PCI space */ + case 80 ... 84: /* GMAC 1 */ + return 1; + + default: + return 0; + } +} + +/* + * Returns copy of control register region + * Note: ethtool_get_regs always provides full size (16k) buffer + */ +static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + const void __iomem *io = sky2->hw->regs; + unsigned int b; + + regs->version = 1; + + for (b = 0; b < 128; b++) { + /* skip poisonous diagnostic ram region in block 3 */ + if (b == 3) + memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); + else if (sky2_reg_access_ok(sky2->hw, b)) + memcpy_fromio(p, io, 128); + else + memset(p, 0, 128); + + p += 128; + io += 128; + } +} + +static int sky2_get_eeprom_len(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + u16 reg2; + + reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); + return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); +} + +static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy) +{ + unsigned long start = jiffies; + + while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) { + /* Can take up to 10.6 ms for write */ + if (time_after(jiffies, start + HZ/4)) { + dev_err(&hw->pdev->dev, "VPD cycle timed out\n"); + return -ETIMEDOUT; + } + mdelay(1); + } + + return 0; +} + +static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data, + u16 offset, size_t length) +{ + int rc = 0; + + while (length > 0) { + u32 val; + + sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); + rc = sky2_vpd_wait(hw, cap, 0); + if (rc) + break; + + val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); + + memcpy(data, &val, min(sizeof(val), length)); + offset += sizeof(u32); + data += sizeof(u32); + length -= sizeof(u32); + } + + return rc; +} + +static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data, + u16 offset, unsigned int length) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < length; i += sizeof(u32)) { + u32 val = *(u32 *)(data + i); + + sky2_pci_write32(hw, cap + PCI_VPD_DATA, val); + sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); + + rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F); + if (rc) + break; + } + return rc; +} + +static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct sky2_port *sky2 = netdev_priv(dev); + int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); + + if (!cap) + return -EINVAL; + + eeprom->magic = SKY2_EEPROM_MAGIC; + + return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len); +} + +static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct sky2_port *sky2 = netdev_priv(dev); + int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); + + if (!cap) + return -EINVAL; + + if (eeprom->magic != SKY2_EEPROM_MAGIC) + return -EINVAL; + + /* Partial writes not supported */ + if ((eeprom->offset & 3) || (eeprom->len & 3)) + return -EINVAL; + + return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); +} + +static u32 sky2_fix_features(struct net_device *dev, u32 features) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + const struct sky2_hw *hw = sky2->hw; + + /* In order to do Jumbo packets on these chips, need to turn off the + * transmit store/forward. Therefore checksum offload won't work. + */ + if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) { + netdev_info(dev, "checksum offload not possible with jumbo frames\n"); + features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM); + } + + /* Some hardware requires receive checksum for RSS to work. */ + if ( (features & NETIF_F_RXHASH) && + !(features & NETIF_F_RXCSUM) && + (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) { + netdev_info(dev, "receive hashing forces receive checksum\n"); + features |= NETIF_F_RXCSUM; + } + + return features; +} + +static int sky2_set_features(struct net_device *dev, u32 features) +{ + struct sky2_port *sky2 = netdev_priv(dev); + u32 changed = dev->features ^ features; + + if (changed & NETIF_F_RXCSUM) { + u32 on = features & NETIF_F_RXCSUM; + sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); + } + + if (changed & NETIF_F_RXHASH) + rx_set_rss(dev, features); + + if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) + sky2_vlan_mode(dev, features); + + return 0; +} + +static const struct ethtool_ops sky2_ethtool_ops = { + .get_settings = sky2_get_settings, + .set_settings = sky2_set_settings, + .get_drvinfo = sky2_get_drvinfo, + .get_wol = sky2_get_wol, + .set_wol = sky2_set_wol, + .get_msglevel = sky2_get_msglevel, + .set_msglevel = sky2_set_msglevel, + .nway_reset = sky2_nway_reset, + .get_regs_len = sky2_get_regs_len, + .get_regs = sky2_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = sky2_get_eeprom_len, + .get_eeprom = sky2_get_eeprom, + .set_eeprom = sky2_set_eeprom, + .get_strings = sky2_get_strings, + .get_coalesce = sky2_get_coalesce, + .set_coalesce = sky2_set_coalesce, + .get_ringparam = sky2_get_ringparam, + .set_ringparam = sky2_set_ringparam, + .get_pauseparam = sky2_get_pauseparam, + .set_pauseparam = sky2_set_pauseparam, + .set_phys_id = sky2_set_phys_id, + .get_sset_count = sky2_get_sset_count, + .get_ethtool_stats = sky2_get_ethtool_stats, +}; + +#ifdef CONFIG_SKY2_DEBUG + +static struct dentry *sky2_debug; + + +/* + * Read and parse the first part of Vital Product Data + */ +#define VPD_SIZE 128 +#define VPD_MAGIC 0x82 + +static const struct vpd_tag { + char tag[2]; + char *label; +} vpd_tags[] = { + { "PN", "Part Number" }, + { "EC", "Engineering Level" }, + { "MN", "Manufacturer" }, + { "SN", "Serial Number" }, + { "YA", "Asset Tag" }, + { "VL", "First Error Log Message" }, + { "VF", "Second Error Log Message" }, + { "VB", "Boot Agent ROM Configuration" }, + { "VE", "EFI UNDI Configuration" }, +}; + +static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw) +{ + size_t vpd_size; + loff_t offs; + u8 len; + unsigned char *buf; + u16 reg2; + + reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); + vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); + + seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev)); + buf = kmalloc(vpd_size, GFP_KERNEL); + if (!buf) { + seq_puts(seq, "no memory!\n"); + return; + } + + if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) { + seq_puts(seq, "VPD read failed\n"); + goto out; + } + + if (buf[0] != VPD_MAGIC) { + seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]); + goto out; + } + len = buf[1]; + if (len == 0 || len > vpd_size - 4) { + seq_printf(seq, "Invalid id length: %d\n", len); + goto out; + } + + seq_printf(seq, "%.*s\n", len, buf + 3); + offs = len + 3; + + while (offs < vpd_size - 4) { + int i; + + if (!memcmp("RW", buf + offs, 2)) /* end marker */ + break; + len = buf[offs + 2]; + if (offs + len + 3 >= vpd_size) + break; + + for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) { + if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) { + seq_printf(seq, " %s: %.*s\n", + vpd_tags[i].label, len, buf + offs + 3); + break; + } + } + offs += len + 3; + } +out: + kfree(buf); +} + +static int sky2_debug_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + const struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned idx, last; + int sop; + + sky2_show_vpd(seq, hw); + + seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n", + sky2_read32(hw, B0_ISRC), + sky2_read32(hw, B0_IMSK), + sky2_read32(hw, B0_Y2_SP_ICR)); + + if (!netif_running(dev)) { + seq_printf(seq, "network not running\n"); + return 0; + } + + napi_disable(&hw->napi); + last = sky2_read16(hw, STAT_PUT_IDX); + + seq_printf(seq, "Status ring %u\n", hw->st_size); + if (hw->st_idx == last) + seq_puts(seq, "Status ring (empty)\n"); + else { + seq_puts(seq, "Status ring\n"); + for (idx = hw->st_idx; idx != last && idx < hw->st_size; + idx = RING_NEXT(idx, hw->st_size)) { + const struct sky2_status_le *le = hw->st_le + idx; + seq_printf(seq, "[%d] %#x %d %#x\n", + idx, le->opcode, le->length, le->status); + } + seq_puts(seq, "\n"); + } + + seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n", + sky2->tx_cons, sky2->tx_prod, + sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), + sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE))); + + /* Dump contents of tx ring */ + sop = 1; + for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size; + idx = RING_NEXT(idx, sky2->tx_ring_size)) { + const struct sky2_tx_le *le = sky2->tx_le + idx; + u32 a = le32_to_cpu(le->addr); + + if (sop) + seq_printf(seq, "%u:", idx); + sop = 0; + + switch (le->opcode & ~HW_OWNER) { + case OP_ADDR64: + seq_printf(seq, " %#x:", a); + break; + case OP_LRGLEN: + seq_printf(seq, " mtu=%d", a); + break; + case OP_VLAN: + seq_printf(seq, " vlan=%d", be16_to_cpu(le->length)); + break; + case OP_TCPLISW: + seq_printf(seq, " csum=%#x", a); + break; + case OP_LARGESEND: + seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length)); + break; + case OP_PACKET: + seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length)); + break; + case OP_BUFFER: + seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length)); + break; + default: + seq_printf(seq, " op=%#x,%#x(%d)", le->opcode, + a, le16_to_cpu(le->length)); + } + + if (le->ctrl & EOP) { + seq_putc(seq, '\n'); + sop = 1; + } + } + + seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n", + sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)), + sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), + sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); + + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); + return 0; +} + +static int sky2_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, sky2_debug_show, inode->i_private); +} + +static const struct file_operations sky2_debug_fops = { + .owner = THIS_MODULE, + .open = sky2_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Use network device events to create/remove/rename + * debugfs file entries + */ +static int sky2_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = ptr; + struct sky2_port *sky2 = netdev_priv(dev); + + if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGENAME: + if (sky2->debugfs) { + sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, + sky2_debug, dev->name); + } + break; + + case NETDEV_GOING_DOWN: + if (sky2->debugfs) { + netdev_printk(KERN_DEBUG, dev, "remove debugfs\n"); + debugfs_remove(sky2->debugfs); + sky2->debugfs = NULL; + } + break; + + case NETDEV_UP: + sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO, + sky2_debug, dev, + &sky2_debug_fops); + if (IS_ERR(sky2->debugfs)) + sky2->debugfs = NULL; + } + + return NOTIFY_DONE; +} + +static struct notifier_block sky2_notifier = { + .notifier_call = sky2_device_event, +}; + + +static __init void sky2_debug_init(void) +{ + struct dentry *ent; + + ent = debugfs_create_dir("sky2", NULL); + if (!ent || IS_ERR(ent)) + return; + + sky2_debug = ent; + register_netdevice_notifier(&sky2_notifier); +} + +static __exit void sky2_debug_cleanup(void) +{ + if (sky2_debug) { + unregister_netdevice_notifier(&sky2_notifier); + debugfs_remove(sky2_debug); + sky2_debug = NULL; + } +} + +#else +#define sky2_debug_init() +#define sky2_debug_cleanup() +#endif + +/* Two copies of network device operations to handle special case of + not allowing netpoll on second port */ +static const struct net_device_ops sky2_netdev_ops[2] = { + { + .ndo_open = sky2_up, + .ndo_stop = sky2_down, + .ndo_start_xmit = sky2_xmit_frame, + .ndo_do_ioctl = sky2_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = sky2_set_mac_address, + .ndo_set_multicast_list = sky2_set_multicast, + .ndo_change_mtu = sky2_change_mtu, + .ndo_fix_features = sky2_fix_features, + .ndo_set_features = sky2_set_features, + .ndo_tx_timeout = sky2_tx_timeout, + .ndo_get_stats64 = sky2_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sky2_netpoll, +#endif + }, + { + .ndo_open = sky2_up, + .ndo_stop = sky2_down, + .ndo_start_xmit = sky2_xmit_frame, + .ndo_do_ioctl = sky2_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = sky2_set_mac_address, + .ndo_set_multicast_list = sky2_set_multicast, + .ndo_change_mtu = sky2_change_mtu, + .ndo_fix_features = sky2_fix_features, + .ndo_set_features = sky2_set_features, + .ndo_tx_timeout = sky2_tx_timeout, + .ndo_get_stats64 = sky2_get_stats, + }, +}; + +/* Initialize network device */ +static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, + unsigned port, + int highmem, int wol) +{ + struct sky2_port *sky2; + struct net_device *dev = alloc_etherdev(sizeof(*sky2)); + + if (!dev) { + dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); + return NULL; + } + + SET_NETDEV_DEV(dev, &hw->pdev->dev); + dev->irq = hw->pdev->irq; + SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); + dev->watchdog_timeo = TX_WATCHDOG; + dev->netdev_ops = &sky2_netdev_ops[port]; + + sky2 = netdev_priv(dev); + sky2->netdev = dev; + sky2->hw = hw; + sky2->msg_enable = netif_msg_init(debug, default_msg); + + /* Auto speed and flow control */ + sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; + if (hw->chip_id != CHIP_ID_YUKON_XL) + dev->hw_features |= NETIF_F_RXCSUM; + + sky2->flow_mode = FC_BOTH; + + sky2->duplex = -1; + sky2->speed = -1; + sky2->advertising = sky2_supported_modes(hw); + sky2->wol = wol; + + spin_lock_init(&sky2->phy_lock); + + sky2->tx_pending = TX_DEF_PENDING; + sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1); + sky2->rx_pending = RX_DEF_PENDING; + + hw->dev[port] = dev; + + sky2->port = port; + + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; + + if (highmem) + dev->features |= NETIF_F_HIGHDMA; + + /* Enable receive hashing unless hardware is known broken */ + if (!(hw->flags & SKY2_HW_RSS_BROKEN)) + dev->hw_features |= NETIF_F_RXHASH; + + if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { + dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->vlan_features |= SKY2_VLAN_OFFLOADS; + } + + dev->features |= dev->hw_features; + + /* read the mac address */ + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + return dev; +} + +static void __devinit sky2_show_addr(struct net_device *dev) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + + netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr); +} + +/* Handle software interrupt used during MSI test */ +static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id) +{ + struct sky2_hw *hw = dev_id; + u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); + + if (status == 0) + return IRQ_NONE; + + if (status & Y2_IS_IRQ_SW) { + hw->flags |= SKY2_HW_USE_MSI; + wake_up(&hw->msi_wait); + sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); + } + sky2_write32(hw, B0_Y2_SP_ICR, 2); + + return IRQ_HANDLED; +} + +/* Test interrupt path by forcing a a software IRQ */ +static int __devinit sky2_test_msi(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + int err; + + init_waitqueue_head(&hw->msi_wait); + + sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); + + err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); + if (err) { + dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); + return err; + } + + sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); + sky2_read8(hw, B0_CTST); + + wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); + + if (!(hw->flags & SKY2_HW_USE_MSI)) { + /* MSI test failed, go back to INTx mode */ + dev_info(&pdev->dev, "No interrupt generated using MSI, " + "switching to INTx mode.\n"); + + err = -EOPNOTSUPP; + sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); + } + + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + free_irq(pdev->irq, hw); + + return err; +} + +/* This driver supports yukon2 chipset only */ +static const char *sky2_name(u8 chipid, char *buf, int sz) +{ + const char *name[] = { + "XL", /* 0xb3 */ + "EC Ultra", /* 0xb4 */ + "Extreme", /* 0xb5 */ + "EC", /* 0xb6 */ + "FE", /* 0xb7 */ + "FE+", /* 0xb8 */ + "Supreme", /* 0xb9 */ + "UL 2", /* 0xba */ + "Unknown", /* 0xbb */ + "Optima", /* 0xbc */ + "Optima Prime", /* 0xbd */ + "Optima 2", /* 0xbe */ + }; + + if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2) + strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); + else + snprintf(buf, sz, "(chip %#x)", chipid); + return buf; +} + +static int __devinit sky2_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + struct sky2_hw *hw; + int err, using_dac = 0, wol_default; + u32 reg; + char buf1[16]; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_out; + } + + /* Get configuration information + * Note: only regular PCI config access once to test for HW issues + * other PCI access through shared memory for speed and to + * avoid MMCONFIG problems. + */ + err = pci_read_config_dword(pdev, PCI_DEV_REG2, ®); + if (err) { + dev_err(&pdev->dev, "PCI read config failed\n"); + goto err_out; + } + + if (~reg == 0) { + dev_err(&pdev->dev, "PCI configuration read error\n"); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_out_disable; + } + + pci_set_master(pdev); + + if (sizeof(dma_addr_t) > sizeof(u32) && + !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) { + using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err < 0) { + dev_err(&pdev->dev, "unable to obtain 64 bit DMA " + "for consistent allocations\n"); + goto err_out_free_regions; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_out_free_regions; + } + } + + +#ifdef __BIG_ENDIAN + /* The sk98lin vendor driver uses hardware byte swapping but + * this driver uses software swapping. + */ + reg &= ~PCI_REV_DESC; + err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + if (err) { + dev_err(&pdev->dev, "PCI write config failed\n"); + goto err_out_free_regions; + } +#endif + + wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; + + err = -ENOMEM; + + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); + if (!hw) { + dev_err(&pdev->dev, "cannot allocate hardware struct\n"); + goto err_out_free_regions; + } + + hw->pdev = pdev; + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); + + hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + if (!hw->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_out_free_hw; + } + + err = sky2_init(hw); + if (err) + goto err_out_iounmap; + + /* ring for status responses */ + hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); + hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), + &hw->st_dma); + if (!hw->st_le) + goto err_out_reset; + + dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", + sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); + + sky2_reset(hw); + + dev = sky2_init_netdev(hw, 0, using_dac, wol_default); + if (!dev) { + err = -ENOMEM; + goto err_out_free_pci; + } + + if (!disable_msi && pci_enable_msi(pdev) == 0) { + err = sky2_test_msi(hw); + if (err == -EOPNOTSUPP) + pci_disable_msi(pdev); + else if (err) + goto err_out_free_netdev; + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "cannot register net device\n"); + goto err_out_free_netdev; + } + + netif_carrier_off(dev); + + netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); + + err = request_irq(pdev->irq, sky2_intr, + (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, + hw->irq_name, hw); + if (err) { + dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); + goto err_out_unregister; + } + sky2_write32(hw, B0_IMSK, Y2_IS_BASE); + napi_enable(&hw->napi); + + sky2_show_addr(dev); + + if (hw->ports > 1) { + struct net_device *dev1; + + err = -ENOMEM; + dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default); + if (dev1 && (err = register_netdev(dev1)) == 0) + sky2_show_addr(dev1); + else { + dev_warn(&pdev->dev, + "register of second port failed (%d)\n", err); + hw->dev[1] = NULL; + hw->ports = 1; + if (dev1) + free_netdev(dev1); + } + } + + setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw); + INIT_WORK(&hw->restart_work, sky2_restart); + + pci_set_drvdata(pdev, hw); + pdev->d3_delay = 150; + + return 0; + +err_out_unregister: + if (hw->flags & SKY2_HW_USE_MSI) + pci_disable_msi(pdev); + unregister_netdev(dev); +err_out_free_netdev: + free_netdev(dev); +err_out_free_pci: + pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), + hw->st_le, hw->st_dma); +err_out_reset: + sky2_write8(hw, B0_CTST, CS_RST_SET); +err_out_iounmap: + iounmap(hw->regs); +err_out_free_hw: + kfree(hw); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out: + pci_set_drvdata(pdev, NULL); + return err; +} + +static void __devexit sky2_remove(struct pci_dev *pdev) +{ + struct sky2_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return; + + del_timer_sync(&hw->watchdog_timer); + cancel_work_sync(&hw->restart_work); + + for (i = hw->ports-1; i >= 0; --i) + unregister_netdev(hw->dev[i]); + + sky2_write32(hw, B0_IMSK, 0); + + sky2_power_aux(hw); + + sky2_write8(hw, B0_CTST, CS_RST_SET); + sky2_read8(hw, B0_CTST); + + free_irq(pdev->irq, hw); + if (hw->flags & SKY2_HW_USE_MSI) + pci_disable_msi(pdev); + pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), + hw->st_le, hw->st_dma); + pci_release_regions(pdev); + pci_disable_device(pdev); + + for (i = hw->ports-1; i >= 0; --i) + free_netdev(hw->dev[i]); + + iounmap(hw->regs); + kfree(hw); + + pci_set_drvdata(pdev, NULL); +} + +static int sky2_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sky2_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return 0; + + del_timer_sync(&hw->watchdog_timer); + cancel_work_sync(&hw->restart_work); + + rtnl_lock(); + + sky2_all_down(hw); + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (sky2->wol) + sky2_wol_init(sky2); + } + + sky2_power_aux(hw); + rtnl_unlock(); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sky2_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sky2_hw *hw = pci_get_drvdata(pdev); + int err; + + if (!hw) + return 0; + + /* Re-enable all clocks */ + err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0); + if (err) { + dev_err(&pdev->dev, "PCI write config failed\n"); + goto out; + } + + rtnl_lock(); + sky2_reset(hw); + sky2_all_up(hw); + rtnl_unlock(); + + return 0; +out: + + dev_err(&pdev->dev, "resume failed (%d)\n", err); + pci_disable_device(pdev); + return err; +} + +static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); +#define SKY2_PM_OPS (&sky2_pm_ops) + +#else + +#define SKY2_PM_OPS NULL +#endif + +static void sky2_shutdown(struct pci_dev *pdev) +{ + sky2_suspend(&pdev->dev); + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +} + +static struct pci_driver sky2_driver = { + .name = DRV_NAME, + .id_table = sky2_id_table, + .probe = sky2_probe, + .remove = __devexit_p(sky2_remove), + .shutdown = sky2_shutdown, + .driver.pm = SKY2_PM_OPS, +}; + +static int __init sky2_init_module(void) +{ + pr_info("driver version " DRV_VERSION "\n"); + + sky2_debug_init(); + return pci_register_driver(&sky2_driver); +} + +static void __exit sky2_cleanup_module(void) +{ + pci_unregister_driver(&sky2_driver); + sky2_debug_cleanup(); +} + +module_init(sky2_init_module); +module_exit(sky2_cleanup_module); + +MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver"); +MODULE_AUTHOR("Stephen Hemminger "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h new file mode 100644 index 000000000000..0af31b8b5f10 --- /dev/null +++ b/drivers/net/ethernet/marvell/sky2.h @@ -0,0 +1,2427 @@ +/* + * Definitions for the new Marvell Yukon 2 driver. + */ +#ifndef _SKY2_H +#define _SKY2_H + +#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */ + +/* PCI config registers */ +enum { + PCI_DEV_REG1 = 0x40, + PCI_DEV_REG2 = 0x44, + PCI_DEV_STATUS = 0x7c, + PCI_DEV_REG3 = 0x80, + PCI_DEV_REG4 = 0x84, + PCI_DEV_REG5 = 0x88, + PCI_CFG_REG_0 = 0x90, + PCI_CFG_REG_1 = 0x94, + + PSM_CONFIG_REG0 = 0x98, + PSM_CONFIG_REG1 = 0x9C, + PSM_CONFIG_REG2 = 0x160, + PSM_CONFIG_REG3 = 0x164, + PSM_CONFIG_REG4 = 0x168, + +}; + +/* Yukon-2 */ +enum pci_dev_reg_1 { + PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ + PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */ + PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */ + PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */ + PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */ + PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ + PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */ + PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */ + + PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit 9.. 8: GPHY Link Trigger Timer */ + PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */ + PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */ + PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */ +}; + +enum pci_dev_reg_2 { + PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */ + PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */ + PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */ + + PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */ + PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */ + PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */ + PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */ + + PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ +}; + +/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */ +enum pci_dev_reg_3 { + P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */ + P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */ + P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */ + P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */ + P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */ + P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */ + P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */ + P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */ + P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */ + P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */ + P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */ + P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */ + P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */ + P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */ + P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */ + P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */ + P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */ + P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */ + P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */ + P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */ + PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS | + P_CLK_COR_REGS_D0_DIS | + P_CLK_COR_LNK1_D0_DIS | + P_CLK_MAC_LNK1_D0_DIS | + P_CLK_PCI_MST_ARB_DIS | + P_CLK_COR_COMMON_DIS | + P_CLK_COR_LNK1_BMU_DIS, +}; + +/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ +enum pci_dev_reg_4 { + /* (Link Training & Status State Machine) */ + P_PEX_LTSSM_STAT_MSK = 0x7fL<<25, /* Bit 31..25: PEX LTSSM Mask */ +#define P_PEX_LTSSM_STAT(x) ((x << 25) & P_PEX_LTSSM_STAT_MSK) + P_PEX_LTSSM_L1_STAT = 0x34, + P_PEX_LTSSM_DET_STAT = 0x01, + P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */ + /* (Active State Power Management) */ + P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */ + P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */ + P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */ + P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */ + + P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */ + P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */ + P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */ + P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */ + P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */ + P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN + | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY, +}; + +/* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */ +enum pci_dev_reg_5 { + /* Bit 31..27: for A3 & later */ + P_CTL_DIV_CORE_CLK_ENA = 1<<31, /* Divide Core Clock Enable */ + P_CTL_SRESET_VMAIN_AV = 1<<30, /* Soft Reset for Vmain_av De-Glitch */ + P_CTL_BYPASS_VMAIN_AV = 1<<29, /* Bypass En. for Vmain_av De-Glitch */ + P_CTL_TIM_VMAIN_AV_MSK = 3<<27, /* Bit 28..27: Timer Vmain_av Mask */ + /* Bit 26..16: Release Clock on Event */ + P_REL_PCIE_RST_DE_ASS = 1<<26, /* PCIe Reset De-Asserted */ + P_REL_GPHY_REC_PACKET = 1<<25, /* GPHY Received Packet */ + P_REL_INT_FIFO_N_EMPTY = 1<<24, /* Internal FIFO Not Empty */ + P_REL_MAIN_PWR_AVAIL = 1<<23, /* Main Power Available */ + P_REL_CLKRUN_REQ_REL = 1<<22, /* CLKRUN Request Release */ + P_REL_PCIE_RESET_ASS = 1<<21, /* PCIe Reset Asserted */ + P_REL_PME_ASSERTED = 1<<20, /* PME Asserted */ + P_REL_PCIE_EXIT_L1_ST = 1<<19, /* PCIe Exit L1 State */ + P_REL_LOADER_NOT_FIN = 1<<18, /* EPROM Loader Not Finished */ + P_REL_PCIE_RX_EX_IDLE = 1<<17, /* PCIe Rx Exit Electrical Idle State */ + P_REL_GPHY_LINK_UP = 1<<16, /* GPHY Link Up */ + + /* Bit 10.. 0: Mask for Gate Clock */ + P_GAT_PCIE_RST_ASSERTED = 1<<10,/* PCIe Reset Asserted */ + P_GAT_GPHY_N_REC_PACKET = 1<<9, /* GPHY Not Received Packet */ + P_GAT_INT_FIFO_EMPTY = 1<<8, /* Internal FIFO Empty */ + P_GAT_MAIN_PWR_N_AVAIL = 1<<7, /* Main Power Not Available */ + P_GAT_CLKRUN_REQ_REL = 1<<6, /* CLKRUN Not Requested */ + P_GAT_PCIE_RESET_ASS = 1<<5, /* PCIe Reset Asserted */ + P_GAT_PME_DE_ASSERTED = 1<<4, /* PME De-Asserted */ + P_GAT_PCIE_ENTER_L1_ST = 1<<3, /* PCIe Enter L1 State */ + P_GAT_LOADER_FINISHED = 1<<2, /* EPROM Loader Finished */ + P_GAT_PCIE_RX_EL_IDLE = 1<<1, /* PCIe Rx Electrical Idle State */ + P_GAT_GPHY_LINK_DOWN = 1<<0, /* GPHY Link Down */ + + PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET | + P_REL_INT_FIFO_N_EMPTY | + P_REL_PCIE_EXIT_L1_ST | + P_REL_PCIE_RX_EX_IDLE | + P_GAT_GPHY_N_REC_PACKET | + P_GAT_INT_FIFO_EMPTY | + P_GAT_PCIE_ENTER_L1_ST | + P_GAT_PCIE_RX_EL_IDLE, +}; + +/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */ +enum pci_cfg_reg1 { + P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */ + /* Bit 23..21: Release Clock on Event */ + P_CF1_REL_LDR_NOT_FIN = 1<<23, /* EEPROM Loader Not Finished */ + P_CF1_REL_VMAIN_AVLBL = 1<<22, /* Vmain available */ + P_CF1_REL_PCIE_RESET = 1<<21, /* PCI-E reset */ + /* Bit 20..18: Gate Clock on Event */ + P_CF1_GAT_LDR_NOT_FIN = 1<<20, /* EEPROM Loader Finished */ + P_CF1_GAT_PCIE_RX_IDLE = 1<<19, /* PCI-E Rx Electrical idle */ + P_CF1_GAT_PCIE_RESET = 1<<18, /* PCI-E Reset */ + P_CF1_PRST_PHY_CLKREQ = 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */ + P_CF1_PCIE_RST_CLKREQ = 1<<16, /* Enable PCI-E rst generate CLKREQ */ + + P_CF1_ENA_CFG_LDR_DONE = 1<<8, /* Enable core level Config loader done */ + + P_CF1_ENA_TXBMU_RD_IDLE = 1<<1, /* Enable TX BMU Read IDLE for ASPM */ + P_CF1_ENA_TXBMU_WR_IDLE = 1<<0, /* Enable TX BMU Write IDLE for ASPM */ + + PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST | + P_CF1_REL_LDR_NOT_FIN | + P_CF1_REL_VMAIN_AVLBL | + P_CF1_REL_PCIE_RESET | + P_CF1_GAT_LDR_NOT_FIN | + P_CF1_GAT_PCIE_RESET | + P_CF1_PRST_PHY_CLKREQ | + P_CF1_ENA_CFG_LDR_DONE | + P_CF1_ENA_TXBMU_RD_IDLE | + P_CF1_ENA_TXBMU_WR_IDLE, +}; + +/* Yukon-Optima */ +enum { + PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */ + + PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */ + PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */ + + PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */ + + PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */ + PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */ + PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */ + PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */ + + PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */ + + PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */ + PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */ +}; + +/* Yukon-Supreme */ +enum { + PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */ + + PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */ + PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */ + PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */ + PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */ + PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */ + PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */ + PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */ + PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */ + PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */ + PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */ + PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */ + PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */ + PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */ + PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */ + PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */ + PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */ + PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */ + PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */ + PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */ + + PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */ + PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */ + PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */ + PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */ + PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */ + PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */ +}; + +/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */ +enum { + /* PHY Link Detect Timer */ + PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4, + PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4, + + PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */ + PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */ +}; + + +#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | \ + PCI_STATUS_REC_MASTER_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_PARITY) + +enum csr_regs { + B0_RAP = 0x0000, + B0_CTST = 0x0004, + + B0_POWER_CTRL = 0x0007, + B0_ISRC = 0x0008, + B0_IMSK = 0x000c, + B0_HWE_ISRC = 0x0010, + B0_HWE_IMSK = 0x0014, + + /* Special ISR registers (Yukon-2 only) */ + B0_Y2_SP_ISRC2 = 0x001c, + B0_Y2_SP_ISRC3 = 0x0020, + B0_Y2_SP_EISR = 0x0024, + B0_Y2_SP_LISR = 0x0028, + B0_Y2_SP_ICR = 0x002c, + + B2_MAC_1 = 0x0100, + B2_MAC_2 = 0x0108, + B2_MAC_3 = 0x0110, + B2_CONN_TYP = 0x0118, + B2_PMD_TYP = 0x0119, + B2_MAC_CFG = 0x011a, + B2_CHIP_ID = 0x011b, + B2_E_0 = 0x011c, + + B2_Y2_CLK_GATE = 0x011d, + B2_Y2_HW_RES = 0x011e, + B2_E_3 = 0x011f, + B2_Y2_CLK_CTRL = 0x0120, + + B2_TI_INI = 0x0130, + B2_TI_VAL = 0x0134, + B2_TI_CTRL = 0x0138, + B2_TI_TEST = 0x0139, + + B2_TST_CTRL1 = 0x0158, + B2_TST_CTRL2 = 0x0159, + B2_GP_IO = 0x015c, + + B2_I2C_CTRL = 0x0160, + B2_I2C_DATA = 0x0164, + B2_I2C_IRQ = 0x0168, + B2_I2C_SW = 0x016c, + + Y2_PEX_PHY_DATA = 0x0170, + Y2_PEX_PHY_ADDR = 0x0172, + + B3_RAM_ADDR = 0x0180, + B3_RAM_DATA_LO = 0x0184, + B3_RAM_DATA_HI = 0x0188, + +/* RAM Interface Registers */ +/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */ +/* + * The HW-Spec. calls this registers Timeout Value 0..11. But this names are + * not usable in SW. Please notice these are NOT real timeouts, these are + * the number of qWords transferred continuously. + */ +#define RAM_BUFFER(port, reg) (reg | (port <<6)) + + B3_RI_WTO_R1 = 0x0190, + B3_RI_WTO_XA1 = 0x0191, + B3_RI_WTO_XS1 = 0x0192, + B3_RI_RTO_R1 = 0x0193, + B3_RI_RTO_XA1 = 0x0194, + B3_RI_RTO_XS1 = 0x0195, + B3_RI_WTO_R2 = 0x0196, + B3_RI_WTO_XA2 = 0x0197, + B3_RI_WTO_XS2 = 0x0198, + B3_RI_RTO_R2 = 0x0199, + B3_RI_RTO_XA2 = 0x019a, + B3_RI_RTO_XS2 = 0x019b, + B3_RI_TO_VAL = 0x019c, + B3_RI_CTRL = 0x01a0, + B3_RI_TEST = 0x01a2, + B3_MA_TOINI_RX1 = 0x01b0, + B3_MA_TOINI_RX2 = 0x01b1, + B3_MA_TOINI_TX1 = 0x01b2, + B3_MA_TOINI_TX2 = 0x01b3, + B3_MA_TOVAL_RX1 = 0x01b4, + B3_MA_TOVAL_RX2 = 0x01b5, + B3_MA_TOVAL_TX1 = 0x01b6, + B3_MA_TOVAL_TX2 = 0x01b7, + B3_MA_TO_CTRL = 0x01b8, + B3_MA_TO_TEST = 0x01ba, + B3_MA_RCINI_RX1 = 0x01c0, + B3_MA_RCINI_RX2 = 0x01c1, + B3_MA_RCINI_TX1 = 0x01c2, + B3_MA_RCINI_TX2 = 0x01c3, + B3_MA_RCVAL_RX1 = 0x01c4, + B3_MA_RCVAL_RX2 = 0x01c5, + B3_MA_RCVAL_TX1 = 0x01c6, + B3_MA_RCVAL_TX2 = 0x01c7, + B3_MA_RC_CTRL = 0x01c8, + B3_MA_RC_TEST = 0x01ca, + B3_PA_TOINI_RX1 = 0x01d0, + B3_PA_TOINI_RX2 = 0x01d4, + B3_PA_TOINI_TX1 = 0x01d8, + B3_PA_TOINI_TX2 = 0x01dc, + B3_PA_TOVAL_RX1 = 0x01e0, + B3_PA_TOVAL_RX2 = 0x01e4, + B3_PA_TOVAL_TX1 = 0x01e8, + B3_PA_TOVAL_TX2 = 0x01ec, + B3_PA_CTRL = 0x01f0, + B3_PA_TEST = 0x01f2, + + Y2_CFG_SPC = 0x1c00, /* PCI config space region */ + Y2_CFG_AER = 0x1d00, /* PCI Advanced Error Report region */ +}; + +/* B0_CTST 24 bit Control/Status register */ +enum { + Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ + Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ + Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ + Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ + Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ + Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ + Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ + Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */ + Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */ + Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */ + + CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ + CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ + CS_STOP_DONE = 1<<5, /* Stop Master is finished */ + CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ + CS_MRST_CLR = 1<<3, /* Clear Master reset */ + CS_MRST_SET = 1<<2, /* Set Master reset */ + CS_RST_CLR = 1<<1, /* Clear Software reset */ + CS_RST_SET = 1, /* Set Software reset */ +}; + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ +enum { + PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ + PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ + PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ + PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ + PC_VAUX_ON = 1<<3, /* Switch VAUX On */ + PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ + PC_VCC_ON = 1<<1, /* Switch VCC On */ + PC_VCC_OFF = 1<<0, /* Switch VCC Off */ +}; + +/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ + +/* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */ +/* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */ +/* B0_Y2_SP_EISR 32 bit Enter ISR Reg */ +/* B0_Y2_SP_LISR 32 bit Leave ISR Reg */ +enum { + Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */ + Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */ + Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */ + Y2_IS_CPU_TO = 1<<28, /* CPU Timeout */ + Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */ + Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */ + Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */ + Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */ + + Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */ + Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */ + Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */ + Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */ + Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */ + + Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */ + Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */ + Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */ + + Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */ + Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */ + Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */ + Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */ + Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */ + + Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU, + Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 + | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, + Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 + | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, + Y2_IS_ERROR = Y2_IS_HW_ERR | + Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 | + Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, +}; + +/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ +enum { + IS_ERR_MSK = 0x00003fff,/* All Error bits */ + + IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ + IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ + IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ + IS_IRQ_STAT = 1<<10, /* IRQ status exception */ + IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ + IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ + IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ + IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ + IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ + IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ + IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ + IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ + IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ + IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ +}; + +/* Hardware error interrupt mask for Yukon 2 */ +enum { + Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */ + Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */ + Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */ + Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */ + Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */ + Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */ + /* Link 2 */ + Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */ + Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */ + Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */ + Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */ + Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */ + Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */ + /* Link 1 */ + Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */ + Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */ + Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */ + Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */ + Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */ + Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */ + + Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 | + Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1, + Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 | + Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2, + + Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT | + Y2_HWE_L1_MASK | Y2_HWE_L2_MASK, +}; + +/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */ +enum { + DPT_START = 1<<1, + DPT_STOP = 1<<0, +}; + +/* B2_TST_CTRL1 8 bit Test Control Register 1 */ +enum { + TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ + TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ + TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ + TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ + TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ + TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ + TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ + TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ +}; + +/* B2_GPIO */ +enum { + GLB_GPIO_CLK_DEB_ENA = 1<<31, /* Clock Debug Enable */ + GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */ + + GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */ + GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */ + GLB_GPIO_STAT_RACE_DIS = 1<<13, /* Status Race Disable */ + GLB_GPIO_TEST_SEL_MSK = 3<<11, /* Testmode Select */ + GLB_GPIO_TEST_SEL_BASE = 1<<11, + GLB_GPIO_RAND_ENA = 1<<10, /* Random Enable */ + GLB_GPIO_RAND_BIT_1 = 1<<9, /* Random Bit 1 */ +}; + +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +enum { + CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ + CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ + CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ +}; + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +enum { + CHIP_ID_YUKON_XL = 0xb3, /* YUKON-2 XL */ + CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */ + CHIP_ID_YUKON_EX = 0xb5, /* YUKON-2 Extreme */ + CHIP_ID_YUKON_EC = 0xb6, /* YUKON-2 EC */ + CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */ + CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */ + CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */ + CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ + CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */ + CHIP_ID_YUKON_PRM = 0xbd, /* YUKON-2 Optima Prime */ + CHIP_ID_YUKON_OP_2 = 0xbe, /* YUKON-2 Optima 2 */ +}; + +enum yukon_xl_rev { + CHIP_REV_YU_XL_A0 = 0, + CHIP_REV_YU_XL_A1 = 1, + CHIP_REV_YU_XL_A2 = 2, + CHIP_REV_YU_XL_A3 = 3, +}; + +enum yukon_ec_rev { + CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ + CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ + CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ +}; +enum yukon_ec_u_rev { + CHIP_REV_YU_EC_U_A0 = 1, + CHIP_REV_YU_EC_U_A1 = 2, + CHIP_REV_YU_EC_U_B0 = 3, + CHIP_REV_YU_EC_U_B1 = 5, +}; +enum yukon_fe_rev { + CHIP_REV_YU_FE_A1 = 1, + CHIP_REV_YU_FE_A2 = 2, +}; +enum yukon_fe_p_rev { + CHIP_REV_YU_FE2_A0 = 0, +}; +enum yukon_ex_rev { + CHIP_REV_YU_EX_A0 = 1, + CHIP_REV_YU_EX_B0 = 2, +}; +enum yukon_supr_rev { + CHIP_REV_YU_SU_A0 = 0, + CHIP_REV_YU_SU_B0 = 1, + CHIP_REV_YU_SU_B1 = 3, +}; + + +/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ +enum { + Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */ + Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */ + Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */ + Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */ + Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */ + Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */ + Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */ + Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */ +}; + +/* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */ +enum { + CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */ + CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */ + CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */ +}; +#define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2) +#define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL) + + +/* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */ +enum { + Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */ +#define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK) + Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */ + Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */ +#define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK) +#define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK) + Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */ + Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */ +}; + +/* B2_TI_CTRL 8 bit Timer control */ +/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ +enum { + TIM_START = 1<<2, /* Start Timer */ + TIM_STOP = 1<<1, /* Stop Timer */ + TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ +}; + +/* B2_TI_TEST 8 Bit Timer Test */ +/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ +/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ +enum { + TIM_T_ON = 1<<2, /* Test mode on */ + TIM_T_OFF = 1<<1, /* Test mode off */ + TIM_T_STEP = 1<<0, /* Test step */ +}; + +/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */ +enum { + PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */ + PEX_DB_ACCESS = 1<<30, /* Access to debug register */ +}; + +/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ + /* Bit 31..19: reserved */ +#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ +/* RAM Interface Registers */ + +/* B3_RI_CTRL 16 bit RAM Interface Control Register */ +enum { + RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ + RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ + + RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ + RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ +}; + +#define SK_RI_TO_53 36 /* RAM interface timeout */ + + +/* Port related registers FIFO, and Arbiter */ +#define SK_REG(port,reg) (((port)<<7)+(reg)) + +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ +/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ +/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ +/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ + +#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ + +/* TXA_CTRL 8 bit Tx Arbiter Control Register */ +enum { + TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ + TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ + TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ + TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ + TXA_START_RC = 1<<3, /* Start sync Rate Control */ + TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ + TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ + TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ +}; + +/* + * Bank 4 - 5 + */ +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +enum { + TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ + TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ + TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ + TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ + TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ + TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ + TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ + + RSS_KEY = 0x0220, /* RSS Key setup */ + RSS_CFG = 0x0248, /* RSS Configuration */ +}; + +enum { + HASH_TCP_IPV6_EX_CTRL = 1<<5, + HASH_IPV6_EX_CTRL = 1<<4, + HASH_TCP_IPV6_CTRL = 1<<3, + HASH_IPV6_CTRL = 1<<2, + HASH_TCP_IPV4_CTRL = 1<<1, + HASH_IPV4_CTRL = 1<<0, + + HASH_ALL = 0x3f, +}; + +enum { + B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ + B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ + B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ + B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ + B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ + B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ + B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ + B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ + B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ +}; + +/* Queue Register Offsets, use Q_ADDR() to access */ +enum { + B8_Q_REGS = 0x0400, /* base of Queue registers */ + Q_D = 0x00, /* 8*32 bit Current Descriptor */ + Q_VLAN = 0x20, /* 16 bit Current VLAN Tag */ + Q_DONE = 0x24, /* 16 bit Done Index */ + Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ + Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ + Q_BC = 0x30, /* 32 bit Current Byte Counter */ + Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ + Q_TEST = 0x38, /* 32 bit Test/Control Register */ + +/* Yukon-2 */ + Q_WM = 0x40, /* 16 bit FIFO Watermark */ + Q_AL = 0x42, /* 8 bit FIFO Alignment */ + Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */ + Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */ + Q_RP = 0x48, /* 8 bit FIFO Read Pointer */ + Q_RL = 0x4a, /* 8 bit FIFO Read Level */ + Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */ + Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */ + Q_WL = 0x4e, /* 8 bit FIFO Write Level */ + Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */ +}; +#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) + +/* Q_TEST 32 bit Test Register */ +enum { + /* Transmit */ + F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */ + F_TX_CHK_AUTO_ON = 1<<30, /* Tx checksum auto calc off (Yukon EX) */ + + /* Receive */ + F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */ + + /* Hardware testbits not used */ +}; + +/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ +enum { + Y2_B8_PREF_REGS = 0x0450, + + PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */ + PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */ + PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */ + PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/ + PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */ + PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */ + PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */ + PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */ + PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */ + PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */ + + PREF_UNIT_MASK_IDX = 0x0fff, +}; +#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg)) + +/* RAM Buffer Register Offsets */ +enum { + + RB_START = 0x00,/* 32 bit RAM Buffer Start Address */ + RB_END = 0x04,/* 32 bit RAM Buffer End Address */ + RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ + RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ + RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ + RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ + RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */ + RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ + RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ + RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ + RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ + RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ + RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ +}; + +/* Receive and Transmit Queues */ +enum { + Q_R1 = 0x0000, /* Receive Queue 1 */ + Q_R2 = 0x0080, /* Receive Queue 2 */ + Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ + Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ + Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ + Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ +}; + +/* Different PHY Types */ +enum { + PHY_ADDR_MARV = 0, +}; + +#define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs)) + + +enum { + LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ + LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ + LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ + LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ + + LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ + +/* Receive GMAC FIFO (YUKON and Yukon-2) */ + + RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ + RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ + RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ + RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ + RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */ + RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */ + RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */ + RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */ + RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */ + RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */ + RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ + + RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ + + RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ + + RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ +}; + + +/* Q_BC 32 bit Current Byte Counter */ + +/* BMU Control Status Registers */ +/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ +/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ +/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ +/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ +/* Q_CSR 32 bit BMU Control/Status Register */ + +/* Rx BMU Control / Status Registers (Yukon-2) */ +enum { + BMU_IDLE = 1<<31, /* BMU Idle State */ + BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */ + BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */ + + BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */ + BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */ + BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */ + BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */ + BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */ + BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */ + BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */ + BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */ + BMU_START = 1<<8, /* Start Rx/Tx Queue */ + BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */ + BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */ + BMU_FIFO_ENA = 1<<5, /* Enable FIFO */ + BMU_FIFO_RST = 1<<4, /* Reset FIFO */ + BMU_OP_ON = 1<<3, /* BMU Operational On */ + BMU_OP_OFF = 1<<2, /* BMU Operational Off */ + BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */ + BMU_RST_SET = 1<<0, /* Set BMU Reset */ + + BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR, + BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START | + BMU_FIFO_ENA | BMU_OP_ON, + + BMU_WM_DEFAULT = 0x600, + BMU_WM_PEX = 0x80, +}; + +/* Tx BMU Control / Status Registers (Yukon-2) */ + /* Bit 31: same as for Rx */ +enum { + BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */ + BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */ + BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */ +}; + +/* TBMU_TEST 0x06B8 Transmit BMU Test Register */ +enum { + TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */ + TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */ + TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */ + TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */ + TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */ + TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */ + TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */ + TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */ + + TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */ + TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */ + TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */ + + TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */ + TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */ + TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */ + + TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */ + TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */ + TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */ + + TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */ + TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */ + TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */ + + TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */ + TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */ + TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */ + + TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */ + TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */ + TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */ +}; + +/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ +/* PREF_UNIT_CTRL 32 bit Prefetch Control register */ +enum { + PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */ + PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */ + PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */ + PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */ +}; + +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ +/* RB_START 32 bit RAM Buffer Start Address */ +/* RB_END 32 bit RAM Buffer End Address */ +/* RB_WP 32 bit RAM Buffer Write Pointer */ +/* RB_RP 32 bit RAM Buffer Read Pointer */ +/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ +/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ +/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ +/* RB_PC 32 bit RAM Buffer Packet Counter */ +/* RB_LEV 32 bit RAM Buffer Level Register */ + +#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ +enum { + RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ + RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ + RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ + RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ +}; + + +/* Transmit GMAC FIFO (YUKON only) */ +enum { + TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ + TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ + TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ + + TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ + TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ + TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ + + TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ + TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ + TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ + + /* Threshold values for Yukon-EC Ultra and Extreme */ + ECU_AE_THR = 0x0070, /* Almost Empty Threshold */ + ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */ + ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */ +}; + +/* Descriptor Poll Timer Registers */ +enum { + B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ + B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ + B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ + + B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ +}; + +/* Time Stamp Timer Registers (YUKON only) */ +enum { + GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ + GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ + GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ +}; + +/* Polling Unit Registers (Yukon-2 only) */ +enum { + POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */ + POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */ + + POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */ + POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */ +}; + +enum { + SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */ + SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */ +}; + +enum { + CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */ + CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */ + CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */ + CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */ + CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */ + CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */ + HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */ + CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */ + HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */ + HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */ +}; + +/* ASF Subsystem Registers (Yukon-2 only) */ +enum { + B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */ + B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */ + B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */ + + B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */ + B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */ + B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */ + B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */ + B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */ + B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */ +}; + +/* Status BMU Registers (Yukon-2 only)*/ +enum { + STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */ + STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */ + + STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */ + STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */ + STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */ + STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */ + STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */ + STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */ + STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */ + STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */ + +/* FIFO Control/Status Registers (Yukon-2 only)*/ + STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */ + STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */ + STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */ + STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */ + STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */ + STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */ + STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */ + +/* Level and ISR Timer Registers (Yukon-2 only)*/ + STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */ + STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */ + STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */ + STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */ + STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */ + STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */ + STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */ + STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */ + STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */ + STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */ + STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */ + STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */ +}; + +enum { + LINKLED_OFF = 0x01, + LINKLED_ON = 0x02, + LINKLED_LINKSYNC_OFF = 0x04, + LINKLED_LINKSYNC_ON = 0x08, + LINKLED_BLINK_OFF = 0x10, + LINKLED_BLINK_ON = 0x20, +}; + +/* GMAC and GPHY Control Registers (YUKON only) */ +enum { + GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ + GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ + GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ + GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ + GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ + WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ + WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ + WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ + WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ + +/* WOL Pattern Length Registers (YUKON only) */ + WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ + WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ + WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ +}; +#define WOL_REGS(port, x) (x + (port)*0x80) + +enum { + WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ + WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ +}; +#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) + +enum { + BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ + BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ +}; + +/* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +enum { + PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Marvel-specific registers */ + PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ + PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ + PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ + PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ + PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ + PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ + PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ + PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ + PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ + PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ + PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ + PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ + PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ + PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ + PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ + PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ + PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ + PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ + PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ + PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ +}; + +enum { + PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ + PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ + PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ + PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ + PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ + PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ + PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ + PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ + PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ + PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ +}; + +enum { + PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ + PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ + PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ +}; + +enum { + PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ + + PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ + PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ + PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ + PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ + PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ + PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ + PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ +}; + +enum { + PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ + PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ + PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ +}; + +/* different Marvell PHY Ids */ +enum { + PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ + + PHY_BCOM_ID1_A1 = 0x6041, + PHY_BCOM_ID1_B2 = 0x6043, + PHY_BCOM_ID1_C0 = 0x6044, + PHY_BCOM_ID1_C5 = 0x6047, + + PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ + PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ + PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ + PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ + PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */ + PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */ +}; + +/* Advertisement register bits */ +enum { + PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ + + PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ + PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ + PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ + PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ + PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ + PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ + PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ + PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ + PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ + PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, + PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | + PHY_AN_100HALF | PHY_AN_100FULL, +}; + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ + PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ + PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ + PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ +}; + +/** Marvell-Specific */ +enum { + PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ + PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ + PHY_M_AN_RF = 1<<13, /* Remote Fault */ + + PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ + PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ + PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ + PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ + PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ + PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ + PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ + PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ +}; + +/* special defines for FIBER (88E1011S only) */ +enum { + PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ + PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ + PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ + PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ +}; + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +enum { + PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ + PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ + PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ + PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ +}; + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ + PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ + PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ + PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ + PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ + PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ +}; + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ +enum { + PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ + PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ + PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ + PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ + PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ + PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ + PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ + PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ + PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ + PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ + PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ + PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ +}; + +enum { + PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ + PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ +}; + +#define PHY_M_PC_MDI_XMODE(x) (((u16)(x)<<5) & PHY_M_PC_MDIX_MSK) + +enum { + PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ + PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ + PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ +}; + +/* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */ +enum { + PHY_M_PC_COP_TX_DIS = 1<<3, /* Copper Transmitter Disable */ + PHY_M_PC_POW_D_ENA = 1<<2, /* Power Down Enable */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ + PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ + PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ + PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ + PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ + + PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ + PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ + + PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ + PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ +}; + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +enum { + PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ + PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ + PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ + PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ + PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ + PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ + PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ + PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ + PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ + PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ + PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ + PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ + PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ + PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ + PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ + PHY_M_PS_JABBER = 1<<0, /* Jabber */ +}; + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ + PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ +}; + +enum { + PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ + PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ + PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ + PHY_M_IS_AN_PR = 1<<12, /* Page Received */ + PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ + PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ + PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ + PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ + PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ + PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ + PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ + PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ + + PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ + PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ + PHY_M_IS_JABBER = 1<<0, /* Jabber */ + + PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE + | PHY_M_IS_DUP_CHANGE, + PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, +}; + + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +enum { + PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ + PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ + + PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ + PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */ + /* (88E1111 only) */ + PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ + /* !!! Errata in spec. (1 = disable) */ + PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ + PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */ + PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ + PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ + PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ + PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */ + + PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */ +}; +#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK) + /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK) + /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_DSC_2(x) ((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2) + /* 000=1x; 001=2x; 010=3x; 011=4x */ +#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK) + /* 01X=0; 110=2.5; 111=25 (MHz) */ + +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ +enum { + PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */ + PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */ + PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */ +}; +/* !!! Errata in spec. (1 = disable) */ + +#define PHY_M_PC_DSC(x) (((u16)(x)<<12) & PHY_M_PC_DSC_MSK) + /* 100=5x; 101=6x; 110=7x; 111=8x */ +enum { + MAC_TX_CLK_0_MHZ = 2, + MAC_TX_CLK_2_5_MHZ = 6, + MAC_TX_CLK_25_MHZ = 7, +}; + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +enum { + PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ + PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ + PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ + PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ + PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ + PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ + PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ + /* (88E1111 only) */ +}; + +enum { + PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */ + /* (88E1011 only) */ + PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ + PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ + PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ + PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ + PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ +}; + +#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) + +/***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/ +enum { + PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */ + PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */ + PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */ + PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */ + PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */ + PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */ +}; + +#define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK) +#define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK) +#define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK) +#define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK) +#define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK) +#define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK) + +enum { + PULS_NO_STR = 0,/* no pulse stretching */ + PULS_21MS = 1,/* 21 ms to 42 ms */ + PULS_42MS = 2,/* 42 ms to 84 ms */ + PULS_84MS = 3,/* 84 ms to 170 ms */ + PULS_170MS = 4,/* 170 ms to 340 ms */ + PULS_340MS = 5,/* 340 ms to 670 ms */ + PULS_670MS = 6,/* 670 ms to 1.3 s */ + PULS_1300MS = 7,/* 1.3 s to 2.7 s */ +}; + +#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) + +enum { + BLINK_42MS = 0,/* 42 ms */ + BLINK_84MS = 1,/* 84 ms */ + BLINK_170MS = 2,/* 170 ms */ + BLINK_340MS = 3,/* 340 ms */ + BLINK_670MS = 4,/* 670 ms */ +}; + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ + +#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ + +enum led_mode { + MO_LED_NORM = 0, + MO_LED_BLINK = 1, + MO_LED_OFF = 2, + MO_LED_ON = 3, +}; + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ +enum { + PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ + PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ + PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ + PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ + PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */ +}; + +/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ +enum { + PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ + PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ + PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ + PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ + PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ + PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ + PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ + /* (88E1111 only) */ + + PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ + PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ + PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ + /* Bit 15..12: reserved (used internally) */ +enum { + PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ + PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ + PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ +}; + +#define PHY_M_FELP_LED2_CTRL(x) (((u16)(x)<<8) & PHY_M_FELP_LED2_MSK) +#define PHY_M_FELP_LED1_CTRL(x) (((u16)(x)<<4) & PHY_M_FELP_LED1_MSK) +#define PHY_M_FELP_LED0_CTRL(x) (((u16)(x)<<0) & PHY_M_FELP_LED0_MSK) + +enum { + LED_PAR_CTRL_COLX = 0x00, + LED_PAR_CTRL_ERROR = 0x01, + LED_PAR_CTRL_DUPLEX = 0x02, + LED_PAR_CTRL_DP_COL = 0x03, + LED_PAR_CTRL_SPEED = 0x04, + LED_PAR_CTRL_LINK = 0x05, + LED_PAR_CTRL_TX = 0x06, + LED_PAR_CTRL_RX = 0x07, + LED_PAR_CTRL_ACT = 0x08, + LED_PAR_CTRL_LNK_RX = 0x09, + LED_PAR_CTRL_LNK_AC = 0x0a, + LED_PAR_CTRL_ACT_BL = 0x0b, + LED_PAR_CTRL_TX_BL = 0x0c, + LED_PAR_CTRL_RX_BL = 0x0d, + LED_PAR_CTRL_COL_BL = 0x0e, + LED_PAR_CTRL_INACT = 0x0f +}; + +/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ +enum { + PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ + PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ + PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ +}; + +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ +/***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/ +enum { + PHY_M_FIB_FORCE_LNK = 1<<10,/* Force Link Good */ + PHY_M_FIB_SIGD_POL = 1<<9, /* SIGDET Polarity */ + PHY_M_FIB_TX_DIS = 1<<3, /* Transmitter Disable */ +}; + +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ +/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/ +enum { + PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */ + PHY_M_MAC_GMIF_PUP = 1<<3, /* GMII Power Up (88E1149 only) */ + PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */ + PHY_M_MAC_MD_COPPER = 5,/* Copper only */ + PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */ +}; +#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK) + +/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ +enum { + PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */ + PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ + PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */ + PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ +}; + +#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) +#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) +#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) +#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) + +/* GMAC registers */ +/* Port Registers */ +enum { + GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ + GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ + GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ + GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ + GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ + GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ + GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ +/* Source Address Registers */ + GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ + GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ + GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ + GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ + GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ + GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ + GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ + GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ + GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ + GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ + GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ + GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ + GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ + GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ + GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ + GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ + GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ + GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ + GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ +/* MIB Counters */ + GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */ + GM_MIB_CNT_END = 0x025C, /* Last MIB counter */ +}; + + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +enum { + GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ + GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ + GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ + GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ + GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ + + GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ + GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ + GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ + GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ + GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ + GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ + GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ + GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */ + GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */ + GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */ + GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */ + GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */ + GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */ + GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */ + GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */ + + GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */ + GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */ + GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */ + GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */ + GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */ + GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */ + GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */ + GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */ + GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */ + GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */ + GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */ + GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */ + GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */ + GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */ + + GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */ + GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */ + GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */ + GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */ + GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */ + GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */ +}; + +/* GMAC Bit Definitions */ +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ +enum { + GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ + GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ + GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ + GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ + GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ + GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ + GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ + GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ + + GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ + GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ + GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ + GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ + GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ +}; + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ +enum { + GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ + GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ + GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ + GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ + GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ + GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ + GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ + GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ + GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ + GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ + GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ + GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ + GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ + GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ + GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ +}; + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ +enum { + GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ + GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ + GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ + GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ +}; + +#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) +#define TX_COL_DEF 0x04 + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +enum { + GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ + GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ + GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ + GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ +}; + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +enum { + GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ + GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ + GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ + GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */ + + TX_JAM_LEN_DEF = 0x03, + TX_JAM_IPG_DEF = 0x0b, + TX_IPG_JAM_DEF = 0x1c, + TX_BOF_LIM_DEF = 0x04, +}; + +#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) +#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) +#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) +#define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK) + + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +enum { + GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ + GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */ + GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */ + GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */ + + GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */ + + GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ +}; + +#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) +#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) + +#define DATA_BLIND_DEF 0x04 +#define IPG_DATA_DEF_1000 0x1e +#define IPG_DATA_DEF_10_100 0x18 + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ +enum { + GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */ + GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */ + GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ + GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ + GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ +}; + +#define GM_SMI_CT_PHY_AD(x) (((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK) +#define GM_SMI_CT_REG_AD(x) (((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK) + +/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ +enum { + GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ + GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ +}; + +/* Receive Frame Status Encoding */ +enum { + GMR_FS_LEN = 0x7fff<<16, /* Bit 30..16: Rx Frame Length */ + GMR_FS_VLAN = 1<<13, /* VLAN Packet */ + GMR_FS_JABBER = 1<<12, /* Jabber Packet */ + GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */ + GMR_FS_MC = 1<<10, /* Multicast Packet */ + GMR_FS_BC = 1<<9, /* Broadcast Packet */ + GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */ + GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */ + GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */ + GMR_FS_MII_ERR = 1<<5, /* MII Error */ + GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */ + GMR_FS_FRAGMENT = 1<<3, /* Fragment */ + + GMR_FS_CRC_ERR = 1<<1, /* CRC Error */ + GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */ + + GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR | + GMR_FS_FRAGMENT | GMR_FS_LONG_ERR | + GMR_FS_MII_ERR | GMR_FS_BAD_FC | + GMR_FS_UN_SIZE | GMR_FS_JABBER, +}; + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ +enum { + RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */ + RX_GCLKMAC_OFF = 1<<30, + + RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */ + RX_STFW_ENA = 1<<28, + + RX_TRUNC_ON = 1<<27, /* enable packet truncation */ + RX_TRUNC_OFF = 1<<26, /* disable packet truncation */ + RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */ + RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */ + + RX_MACSEC_FLUSH_ON = 1<<23, + RX_MACSEC_FLUSH_OFF = 1<<22, + RX_MACSEC_ASF_FLUSH_ON = 1<<21, + RX_MACSEC_ASF_FLUSH_OFF = 1<<20, + + GMF_RX_OVER_ON = 1<<19, /* enable flushing on receive overrun */ + GMF_RX_OVER_OFF = 1<<18, /* disable flushing on receive overrun */ + GMF_ASF_RX_OVER_ON = 1<<17, /* enable flushing of ASF when overrun */ + GMF_ASF_RX_OVER_OFF = 1<<16, /* disable flushing of ASF when overrun */ + + GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ + GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ + GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ + + GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ + GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ + GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ + GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ + GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ + GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ + GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */ + + GMF_OPER_ON = 1<<3, /* Operational Mode On */ + GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ + GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ + GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ + + RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ + + GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, +}; + +/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */ +enum { + RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */ + RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */ + RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */ + RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */ + RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */ + RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */ + RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */ + RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */ + RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */ + RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */ +}; + +/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ +enum { + TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */ +}; + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ +enum { + TX_STFW_DIS = 1<<31,/* Disable Store & Forward */ + TX_STFW_ENA = 1<<30,/* Enable Store & Forward */ + + TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ + TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ + + TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */ + TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */ + + GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ + GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ + GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ + + GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ + GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ + GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ +}; + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ +enum { + GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ + GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ + GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ +}; + +/* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */ +enum { + Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */ + Y2_ASF_RESET = 1<<3, /* ASF system in reset state */ + Y2_ASF_RUNNING = 1<<2, /* ASF system operational */ + Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */ + Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */ + + Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */ + Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */ +}; + +/* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */ +enum { + Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */ + Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */ +}; +/* HCU_CCSR CPU Control and Status Register */ +enum { + HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */ + HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */ + /* Clock Stretching Timeout */ + HCU_CCSR_CS_TO = 1<<25, + HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */ + + HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */ + HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */ + + HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */ + HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */ + + HCU_CCSR_SET_SYNC_CPU = 1<<5, + HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */ + HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3, + HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */ +/* Microcontroller State */ + HCU_CCSR_UC_STATE_MSK = 3, + HCU_CCSR_UC_STATE_BASE = 1<<0, + HCU_CCSR_ASF_RESET = 0, + HCU_CCSR_ASF_HALTED = 1<<1, + HCU_CCSR_ASF_RUNNING = 1<<0, +}; + +/* HCU_HCSR Host Control and Status Register */ +enum { + HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */ + + HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */ + HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */ +}; + +/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */ +enum { + SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */ + SC_STAT_OP_ON = 1<<3, /* Operational Mode On */ + SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */ + SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */ + SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */ +}; + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ +enum { + GMC_SET_RST = 1<<15,/* MAC SEC RST */ + GMC_SEC_RST_OFF = 1<<14,/* MAC SEC RSt OFF */ + GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */ + GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */ + GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */ + GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX off*/ + GMC_BYP_RETR_ON = 1<<9, /* Bypass retransmit FIFO On */ + GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */ + + GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ + GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ + GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ + GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ + GMC_PAUSE_ON = 1<<3, /* Pause On */ + GMC_PAUSE_OFF = 1<<2, /* Pause Off */ + GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ + GMC_RST_SET = 1<<0, /* Set GMAC Reset */ +}; + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ +enum { + GPC_TX_PAUSE = 1<<30, /* Tx pause enabled (ro) */ + GPC_RX_PAUSE = 1<<29, /* Rx pause enabled (ro) */ + GPC_SPEED = 3<<27, /* PHY speed (ro) */ + GPC_LINK = 1<<26, /* Link up (ro) */ + GPC_DUPLEX = 1<<25, /* Duplex (ro) */ + GPC_CLOCK = 1<<24, /* 125Mhz clock stable (ro) */ + + GPC_PDOWN = 1<<23, /* Internal regulator 2.5 power down */ + GPC_TSTMODE = 1<<22, /* Test mode */ + GPC_REG18 = 1<<21, /* Reg18 Power down */ + GPC_REG12SEL = 3<<19, /* Reg12 power setting */ + GPC_REG18SEL = 3<<17, /* Reg18 power setting */ + GPC_SPILOCK = 1<<16, /* SPI lock (ASF) */ + + GPC_LEDMUX = 3<<14, /* LED Mux */ + GPC_INTPOL = 1<<13, /* Interrupt polarity */ + GPC_DETECT = 1<<12, /* Energy detect */ + GPC_1000HD = 1<<11, /* Enable 1000Mbit HD */ + GPC_SLAVE = 1<<10, /* Slave mode */ + GPC_PAUSE = 1<<9, /* Pause enable */ + GPC_LEDCTL = 3<<6, /* GPHY Leds */ + + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ + GPC_RST_SET = 1<<0, /* Set GPHY Reset */ +}; + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +enum { + GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ + GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ + GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ + GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ + GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ + GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ + +#define GMAC_DEF_MSK GM_IS_TX_FF_UR +}; + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ +enum { /* Bits 15.. 2: reserved */ + GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ + GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ +}; + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ +enum { + WOL_CTL_LINK_CHG_OCC = 1<<15, + WOL_CTL_MAGIC_PKT_OCC = 1<<14, + WOL_CTL_PATTERN_OCC = 1<<13, + WOL_CTL_CLEAR_RESULT = 1<<12, + WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, + WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, + WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, + WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, + WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, + WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, + WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, + WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, + WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, + WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, + WOL_CTL_ENA_PATTERN_UNIT = 1<<1, + WOL_CTL_DIS_PATTERN_UNIT = 1<<0, +}; + + +/* Control flags */ +enum { + UDPTCP = 1<<0, + CALSUM = 1<<1, + WR_SUM = 1<<2, + INIT_SUM= 1<<3, + LOCK_SUM= 1<<4, + INS_VLAN= 1<<5, + EOP = 1<<7, +}; + +enum { + HW_OWNER = 1<<7, + OP_TCPWRITE = 0x11, + OP_TCPSTART = 0x12, + OP_TCPINIT = 0x14, + OP_TCPLCK = 0x18, + OP_TCPCHKSUM = OP_TCPSTART, + OP_TCPIS = OP_TCPINIT | OP_TCPSTART, + OP_TCPLW = OP_TCPLCK | OP_TCPWRITE, + OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE, + OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE, + + OP_ADDR64 = 0x21, + OP_VLAN = 0x22, + OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN, + OP_LRGLEN = 0x24, + OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN, + OP_MSS = 0x28, + OP_MSSVLAN = OP_MSS | OP_VLAN, + + OP_BUFFER = 0x40, + OP_PACKET = 0x41, + OP_LARGESEND = 0x43, + OP_LSOV2 = 0x45, + +/* YUKON-2 STATUS opcodes defines */ + OP_RXSTAT = 0x60, + OP_RXTIMESTAMP = 0x61, + OP_RXVLAN = 0x62, + OP_RXCHKS = 0x64, + OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN, + OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN, + OP_RSS_HASH = 0x65, + OP_TXINDEXLE = 0x68, + OP_MACSEC = 0x6c, + OP_PUTIDX = 0x70, +}; + +enum status_css { + CSS_TCPUDPCSOK = 1<<7, /* TCP / UDP checksum is ok */ + CSS_ISUDP = 1<<6, /* packet is a UDP packet */ + CSS_ISTCP = 1<<5, /* packet is a TCP packet */ + CSS_ISIPFRAG = 1<<4, /* packet is a TCP/UDP frag, CS calc not done */ + CSS_ISIPV6 = 1<<3, /* packet is a IPv6 packet */ + CSS_IPV4CSUMOK = 1<<2, /* IP v4: TCP header checksum is ok */ + CSS_ISIPV4 = 1<<1, /* packet is a IPv4 packet */ + CSS_LINK_BIT = 1<<0, /* port number (legacy) */ +}; + +/* Yukon 2 hardware interface */ +struct sky2_tx_le { + __le32 addr; + __le16 length; /* also vlan tag or checksum start */ + u8 ctrl; + u8 opcode; +} __packed; + +struct sky2_rx_le { + __le32 addr; + __le16 length; + u8 ctrl; + u8 opcode; +} __packed; + +struct sky2_status_le { + __le32 status; /* also checksum */ + __le16 length; /* also vlan tag */ + u8 css; + u8 opcode; +} __packed; + +struct tx_ring_info { + struct sk_buff *skb; + unsigned long flags; +#define TX_MAP_SINGLE 0x0001 +#define TX_MAP_PAGE 0x0002 + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct rx_ring_info { + struct sk_buff *skb; + dma_addr_t data_addr; + DEFINE_DMA_UNMAP_LEN(data_size); + dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; +}; + +enum flow_control { + FC_NONE = 0, + FC_TX = 1, + FC_RX = 2, + FC_BOTH = 3, +}; + +struct sky2_stats { + struct u64_stats_sync syncp; + u64 packets; + u64 bytes; +}; + +struct sky2_port { + struct sky2_hw *hw; + struct net_device *netdev; + unsigned port; + u32 msg_enable; + spinlock_t phy_lock; + + struct tx_ring_info *tx_ring; + struct sky2_tx_le *tx_le; + struct sky2_stats tx_stats; + + u16 tx_ring_size; + u16 tx_cons; /* next le to check */ + u16 tx_prod; /* next le to use */ + u16 tx_next; /* debug only */ + + u16 tx_pending; + u16 tx_last_mss; + u32 tx_last_upper; + u32 tx_tcpsum; + + struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp; + struct sky2_rx_le *rx_le; + struct sky2_stats rx_stats; + + u16 rx_next; /* next re to check */ + u16 rx_put; /* next le index to use */ + u16 rx_pending; + u16 rx_data_size; + u16 rx_nfrags; + u16 rx_tag; + + struct { + unsigned long last; + u32 mac_rp; + u8 mac_lev; + u8 fifo_rp; + u8 fifo_lev; + } check; + + dma_addr_t rx_le_map; + dma_addr_t tx_le_map; + + u16 advertising; /* ADVERTISED_ bits */ + u16 speed; /* SPEED_1000, SPEED_100, ... */ + u8 wol; /* WAKE_ bits */ + u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ + u16 flags; +#define SKY2_FLAG_AUTO_SPEED 0x0002 +#define SKY2_FLAG_AUTO_PAUSE 0x0004 + + enum flow_control flow_mode; + enum flow_control flow_status; + +#ifdef CONFIG_SKY2_DEBUG + struct dentry *debugfs; +#endif +}; + +struct sky2_hw { + void __iomem *regs; + struct pci_dev *pdev; + struct napi_struct napi; + struct net_device *dev[2]; + unsigned long flags; +#define SKY2_HW_USE_MSI 0x00000001 +#define SKY2_HW_FIBRE_PHY 0x00000002 +#define SKY2_HW_GIGABIT 0x00000004 +#define SKY2_HW_NEWER_PHY 0x00000008 +#define SKY2_HW_RAM_BUFFER 0x00000010 +#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ +#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ +#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ +#define SKY2_HW_RSS_BROKEN 0x00000100 +#define SKY2_HW_VLAN_BROKEN 0x00000200 +#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */ + + u8 chip_id; + u8 chip_rev; + u8 pmd_type; + u8 ports; + + struct sky2_status_le *st_le; + u32 st_size; + u32 st_idx; + dma_addr_t st_dma; + + struct timer_list watchdog_timer; + struct work_struct restart_work; + wait_queue_head_t msi_wait; + + char irq_name[0]; +}; + +static inline int sky2_is_copper(const struct sky2_hw *hw) +{ + return !(hw->flags & SKY2_HW_FIBRE_PHY); +} + +/* Register accessor for memory mapped device */ +static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg) +{ + return readl(hw->regs + reg); +} + +static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg) +{ + return readw(hw->regs + reg); +} + +static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg) +{ + return readb(hw->regs + reg); +} + +static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val) +{ + writel(val, hw->regs + reg); +} + +static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val) +{ + writew(val, hw->regs + reg); +} + +static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val) +{ + writeb(val, hw->regs + reg); +} + +/* Yukon PHY related registers */ +#define SK_GMAC_REG(port,reg) \ + (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) +#define GM_PHY_RETRIES 100 + +static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg) +{ + return sky2_read16(hw, SK_GMAC_REG(port,reg)); +} + +static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + unsigned base = SK_GMAC_REG(port, reg); + return (u32) sky2_read16(hw, base) + | (u32) sky2_read16(hw, base+4) << 16; +} + +static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + unsigned base = SK_GMAC_REG(port, reg); + + return (u64) sky2_read16(hw, base) + | (u64) sky2_read16(hw, base+4) << 16 + | (u64) sky2_read16(hw, base+8) << 32 + | (u64) sky2_read16(hw, base+12) << 48; +} + +/* There is no way to atomically read32 bit values from PHY, so retry */ +static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + u32 val; + + do { + val = gma_read32(hw, port, reg); + } while (gma_read32(hw, port, reg) != val); + + return val; +} + +static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + u64 val; + + do { + val = gma_read64(hw, port, reg); + } while (gma_read64(hw, port, reg) != val); + + return val; +} + +static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v) +{ + sky2_write16(hw, SK_GMAC_REG(port,r), v); +} + +static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, + const u8 *addr) +{ + gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); + gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); + gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); +} + +/* PCI config space access */ +static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read32(hw, Y2_CFG_SPC + reg); +} + +static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read16(hw, Y2_CFG_SPC + reg); +} + +static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) +{ + sky2_write32(hw, Y2_CFG_SPC + reg, val); +} + +static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) +{ + sky2_write16(hw, Y2_CFG_SPC + reg, val); +} +#endif -- cgit v1.2.1 From 01789349ee52e4a3faf376f1485303d9723c4f1f Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Aug 2011 06:29:00 +0000 Subject: net: introduce IFF_UNICAST_FLT private flag Use IFF_UNICAST_FTL to find out if driver handles unicast address filtering. In case it does not, promisc mode is entered. Patch also fixes following drivers: stmmac, niu: support uc filtering and yet it propagated ndo_set_multicast_list bna, benet, pxa168_eth, ks8851, ks8851_mll, ksz884x : has set ndo_set_rx_mode but do not support uc filtering Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 259699983ca5..1e2c9f072bfd 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2923,6 +2923,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; + dev->priv_flags |= IFF_UNICAST_FLT; + SET_NETDEV_DEV(dev, &pdev->dev); if (mp->shared->win_protect) -- cgit v1.2.1 From afc4b13df143122f99a0eb10bfefb216c2806de0 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Aug 2011 06:29:01 +0000 Subject: net: remove use of ndo_set_multicast_list in drivers replace it by ndo_set_rx_mode Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/skge.c | 2 +- drivers/net/ethernet/marvell/sky2.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 98ec614c5690..34622b038094 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3762,7 +3762,7 @@ static const struct net_device_ops skge_netdev_ops = { .ndo_tx_timeout = skge_tx_timeout, .ndo_change_mtu = skge_change_mtu, .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = skge_set_multicast, + .ndo_set_rx_mode = skge_set_multicast, .ndo_set_mac_address = skge_set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = skge_netpoll, diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 57339da76326..3ff0a1292933 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4612,7 +4612,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = { .ndo_do_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, - .ndo_set_multicast_list = sky2_set_multicast, + .ndo_set_rx_mode = sky2_set_multicast, .ndo_change_mtu = sky2_change_mtu, .ndo_fix_features = sky2_fix_features, .ndo_set_features = sky2_set_features, @@ -4629,7 +4629,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = { .ndo_do_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, - .ndo_set_multicast_list = sky2_set_multicast, + .ndo_set_rx_mode = sky2_set_multicast, .ndo_change_mtu = sky2_change_mtu, .ndo_fix_features = sky2_fix_features, .ndo_set_features = sky2_set_features, -- cgit v1.2.1 From 88f07484ccdf08e58dc462ed1ac7eb2e84d88a17 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Tue, 23 Aug 2011 01:29:52 -0700 Subject: drivers/net/ethernet/*: Enabled vendor Kconfig options Based on finds for Stephen Rothwell, where current defconfig's enable a ethernet driver and it is not compiled due to the newly added NET_VENDOR_* component of Kconfig. This patch enables all the "new" Kconfig options so that current defconfig's will continue to compile the expected drivers. In addition, by enabling all the new Kconfig options does not add any un-expected options. CC: Stephen Rothwll Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/marvell/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index e525408367b6..0029934748bc 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -4,6 +4,7 @@ config NET_VENDOR_MARVELL bool "Marvell devices" + default y depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET ---help--- If you have a network (Ethernet) card belonging to this class, say Y -- cgit v1.2.1 From f106358b468bcbdff0a54fa96aeb5527cb2debbb Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 31 Aug 2011 00:46:57 +0000 Subject: mv643xx: convert to SKB paged frag API. Signed-off-by: Ian Campbell Cc: Lennert Buytenhek Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1e2c9f072bfd..7325737fe93b 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -752,10 +752,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) desc->l4i_chk = 0; desc->byte_cnt = this_frag->size; - desc->buf_ptr = dma_map_page(mp->dev->dev.parent, - this_frag->page, - this_frag->page_offset, - this_frag->size, DMA_TO_DEVICE); + desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, + this_frag, 0, + this_frag->size, + DMA_TO_DEVICE); } } -- cgit v1.2.1 From 516733c2bbb76404faa201e1595361be6ab58119 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 21 Sep 2011 21:53:17 +0000 Subject: skge: convert to SKB paged frag API. Signed-off-by: Ian Campbell Cc: Stephen Hemminger Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/skge.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 34622b038094..88e5856e06db 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2758,8 +2758,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - map = pci_map_page(hw->pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); + map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, + frag->size, PCI_DMA_TODEVICE); e = e->next; e->skb = skb; -- cgit v1.2.1 From 950a5a4fdbfbea90feda70bab3178eafecc64d0b Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 21 Sep 2011 21:53:18 +0000 Subject: sky2: convert to SKB paged frag API. Signed-off-by: Ian Campbell Cc: Stephen Hemminger Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/sky2.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 3ff0a1292933..ef2dc021d09c 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1226,10 +1226,9 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - re->frag_addr[i] = pci_map_page(pdev, frag->page, - frag->page_offset, - frag->size, - PCI_DMA_FROMDEVICE); + re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0, + frag->size, + PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(pdev, re->frag_addr[i])) goto map_page_error; @@ -1910,8 +1909,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); + mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0, + frag->size, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(hw->pdev, mapping)) goto mapping_unwind; @@ -2449,7 +2448,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, if (length == 0) { /* don't need this page */ - __free_page(frag->page); + __skb_frag_unref(frag); --skb_shinfo(skb)->nr_frags; } else { size = min(length, (unsigned) PAGE_SIZE); -- cgit v1.2.1 From 0bdb0bd0139f3b6afa252de1487e3ce82a494db9 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 23 Sep 2011 11:13:40 +0000 Subject: sky2: manage irq better on single port card Most sky2 hardware only has a single port, although some variations of the chip support two interfaces. For the single port case, use the standard Ethernet driver convention of allocating IRQ when device is brought up rather than at probe time. Also, change the error handling of dual port cards so that if second port can not be brought up, then just fail. No point in continuing, since the failure is most certainly because of out of memory. The dual port sky2 device has a single irq and a single status ring, therefore it has a single NAPI object shared by both ports. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/sky2.c | 85 +++++++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index ef2dc021d09c..338b10c6f52e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -148,6 +148,7 @@ static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; static void sky2_set_multicast(struct net_device *dev); +static irqreturn_t sky2_intr(int irq, void *dev_id); /* Access to PHY via serial interconnect */ static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) @@ -1715,6 +1716,27 @@ static void sky2_hw_up(struct sky2_port *sky2) sky2_rx_start(sky2); } +/* Setup device IRQ and enable napi to process */ +static int sky2_setup_irq(struct sky2_hw *hw, const char *name) +{ + struct pci_dev *pdev = hw->pdev; + int err; + + err = request_irq(pdev->irq, sky2_intr, + (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, + name, hw); + if (err) + dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); + else { + napi_enable(&hw->napi); + sky2_write32(hw, B0_IMSK, Y2_IS_BASE); + sky2_read32(hw, B0_IMSK); + } + + return err; +} + + /* Bring up network interface. */ static int sky2_up(struct net_device *dev) { @@ -1730,6 +1752,10 @@ static int sky2_up(struct net_device *dev) if (err) goto err_out; + /* With single port, IRQ is setup when device is brought up */ + if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name))) + goto err_out; + sky2_hw_up(sky2); /* Enable interrupts from phy/mac for port */ @@ -2091,8 +2117,13 @@ static int sky2_down(struct net_device *dev) sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]); sky2_read32(hw, B0_IMSK); - synchronize_irq(hw->pdev->irq); - napi_synchronize(&hw->napi); + if (hw->ports == 1) { + napi_disable(&hw->napi); + free_irq(hw->pdev->irq, hw); + } else { + synchronize_irq(hw->pdev->irq); + napi_synchronize(&hw->napi); + } sky2_hw_down(sky2); @@ -4798,7 +4829,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz) static int __devinit sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct net_device *dev; + struct net_device *dev, *dev1; struct sky2_hw *hw; int err, using_dac = 0, wol_default; u32 reg; @@ -4924,33 +4955,26 @@ static int __devinit sky2_probe(struct pci_dev *pdev, netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); - err = request_irq(pdev->irq, sky2_intr, - (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, - hw->irq_name, hw); - if (err) { - dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); - goto err_out_unregister; - } - sky2_write32(hw, B0_IMSK, Y2_IS_BASE); - napi_enable(&hw->napi); - sky2_show_addr(dev); if (hw->ports > 1) { - struct net_device *dev1; - - err = -ENOMEM; dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default); - if (dev1 && (err = register_netdev(dev1)) == 0) - sky2_show_addr(dev1); - else { - dev_warn(&pdev->dev, - "register of second port failed (%d)\n", err); - hw->dev[1] = NULL; - hw->ports = 1; - if (dev1) - free_netdev(dev1); + if (!dev1) { + err = -ENOMEM; + goto err_out_unregister; } + + err = register_netdev(dev1); + if (err) { + dev_err(&pdev->dev, "cannot register second net device\n"); + goto err_out_free_dev1; + } + + err = sky2_setup_irq(hw, hw->irq_name); + if (err) + goto err_out_unregister_dev1; + + sky2_show_addr(dev1); } setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw); @@ -4961,6 +4985,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev, return 0; +err_out_unregister_dev1: + unregister_netdev(dev1); +err_out_free_dev1: + free_netdev(dev1); err_out_unregister: if (hw->flags & SKY2_HW_USE_MSI) pci_disable_msi(pdev); @@ -5000,13 +5028,18 @@ static void __devexit sky2_remove(struct pci_dev *pdev) unregister_netdev(hw->dev[i]); sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); sky2_power_aux(hw); sky2_write8(hw, B0_CTST, CS_RST_SET); sky2_read8(hw, B0_CTST); - free_irq(pdev->irq, hw); + if (hw->ports > 1) { + napi_disable(&hw->napi); + free_irq(pdev->irq, hw); + } + if (hw->flags & SKY2_HW_USE_MSI) pci_disable_msi(pdev); pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), -- cgit v1.2.1 From a9e9fd7182332d0cf5f3e601df3e71dd431b70d7 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 27 Sep 2011 13:41:37 -0400 Subject: skge: handle irq better on single port card Most boards with SysKonnect/Marvell Ethernet have only a single port. For the single port case, use the standard Ethernet driver convention of allocating IRQ when device is brought up rather than at probe time. This patch also adds some additional read after writes to avoid any PCI posting problems when setting the IRQ mask. The error handling of dual port cards is also changed. If second port can not be brought up, then just fail. No point in continuing, since the failure is most certainly because of out of memory. It is worth noting that the dual port skge device has a single irq but two seperate status rings and therefore has two NAPI objects, one for each port. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/skge.c | 72 ++++++++++++++++++++++++++----------- 1 file changed, 52 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 88e5856e06db..a0a647154245 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -113,6 +113,7 @@ static void yukon_init(struct skge_hw *hw, int port); static void genesis_mac_init(struct skge_hw *hw, int port); static void genesis_link_up(struct skge_port *skge); static void skge_set_multicast(struct net_device *dev); +static irqreturn_t skge_intr(int irq, void *dev_id); /* Avoid conditionals by using array */ static const int txqaddr[] = { Q_XA1, Q_XA2 }; @@ -2568,6 +2569,16 @@ static int skge_up(struct net_device *dev) if (err) goto free_rx_ring; + if (hw->ports == 1) { + err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED, + dev->name, hw); + if (err) { + netdev_err(dev, "Unable to allocate interrupt %d error: %d\n", + hw->pdev->irq, err); + goto free_tx_ring; + } + } + /* Initialize MAC */ spin_lock_bh(&hw->phy_lock); if (is_genesis(hw)) @@ -2595,11 +2606,14 @@ static int skge_up(struct net_device *dev) spin_lock_irq(&hw->hw_lock); hw->intr_mask |= portmask[port]; skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); napi_enable(&skge->napi); return 0; + free_tx_ring: + kfree(skge->tx_ring.start); free_rx_ring: skge_rx_clean(skge); kfree(skge->rx_ring.start); @@ -2640,9 +2654,13 @@ static int skge_down(struct net_device *dev) spin_lock_irq(&hw->hw_lock); hw->intr_mask &= ~portmask[port]; - skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask); + skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); + if (hw->ports == 1) + free_irq(hw->pdev->irq, hw); + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); if (is_genesis(hw)) genesis_stop(skge); @@ -3603,7 +3621,8 @@ static int skge_reset(struct skge_hw *hw) skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); skge_write32(hw, B2_IRQM_CTRL, TIM_START); - skge_write32(hw, B0_IMSK, hw->intr_mask); + /* Leave irq disabled until first port is brought up. */ + skge_write32(hw, B0_IMSK, 0); for (i = 0; i < hw->ports; i++) { if (is_genesis(hw)) @@ -3930,31 +3949,39 @@ static int __devinit skge_probe(struct pci_dev *pdev, goto err_out_free_netdev; } - err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw); - if (err) { - dev_err(&pdev->dev, "%s: cannot assign irq %d\n", - dev->name, pdev->irq); - goto err_out_unregister; - } skge_show_addr(dev); if (hw->ports > 1) { dev1 = skge_devinit(hw, 1, using_dac); - if (dev1 && register_netdev(dev1) == 0) - skge_show_addr(dev1); - else { - /* Failure to register second port need not be fatal */ - dev_warn(&pdev->dev, "register of second port failed\n"); - hw->dev[1] = NULL; - hw->ports = 1; - if (dev1) - free_netdev(dev1); + if (!dev1) { + err = -ENOMEM; + goto err_out_unregister; } + + err = register_netdev(dev1); + if (err) { + dev_err(&pdev->dev, "cannot register second net device\n"); + goto err_out_free_dev1; + } + + err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, + hw->irq_name, hw); + if (err) { + dev_err(&pdev->dev, "cannot assign irq %d\n", + pdev->irq); + goto err_out_unregister_dev1; + } + + skge_show_addr(dev1); } pci_set_drvdata(pdev, hw); return 0; +err_out_unregister_dev1: + unregister_netdev(dev1); +err_out_free_dev1: + free_netdev(dev1); err_out_unregister: unregister_netdev(dev); err_out_free_netdev: @@ -3992,14 +4019,19 @@ static void __devexit skge_remove(struct pci_dev *pdev) spin_lock_irq(&hw->hw_lock); hw->intr_mask = 0; - skge_write32(hw, B0_IMSK, 0); - skge_read32(hw, B0_IMSK); + + if (hw->ports > 1) { + skge_write32(hw, B0_IMSK, 0); + skge_read32(hw, B0_IMSK); + free_irq(pdev->irq, hw); + } spin_unlock_irq(&hw->hw_lock); skge_write16(hw, B0_LED, LED_STAT_OFF); skge_write8(hw, B0_CTST, CS_RST_SET); - free_irq(pdev->irq, hw); + if (hw->ports > 1) + free_irq(pdev->irq, hw); pci_release_regions(pdev); pci_disable_device(pdev); if (dev1) -- cgit v1.2.1 From 5d6bcdfe38ce883946aebf751a64695471ce1ab5 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Thu, 6 Oct 2011 11:10:48 +0100 Subject: net: use DMA_x_DEVICE and dma_mapping_error with skb_frag_dma_map When I converted some drivers from pci_map_page to skb_frag_dma_map I neglected to convert PCI_DMA_xDEVICE into DMA_x_DEVICE and pci_dma_mapping_error into dma_mapping_error. Signed-off-by: Ian Campbell Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/skge.c | 2 +- drivers/net/ethernet/marvell/sky2.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index a0a647154245..32db4c877ff1 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2777,7 +2777,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, - frag->size, PCI_DMA_TODEVICE); + frag->size, DMA_TO_DEVICE); e = e->next; e->skb = skb; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 338b10c6f52e..a3ce9b6d36af 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1229,9 +1229,9 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (pci_dma_mapping_error(pdev, re->frag_addr[i])) + if (dma_mapping_error(&pdev->dev, re->frag_addr[i])) goto map_page_error; } return 0; @@ -1936,9 +1936,9 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0, - frag->size, PCI_DMA_TODEVICE); + frag->size, DMA_TO_DEVICE); - if (pci_dma_mapping_error(hw->pdev, mapping)) + if (dma_mapping_error(&hw->pdev->dev, mapping)) goto mapping_unwind; upper = upper_32_bits(mapping); -- cgit v1.2.1 From 8b0c11679fd37522d8d34a76101319a085d80912 Mon Sep 17 00:00:00 2001 From: Rick Jones Date: Fri, 7 Oct 2011 19:13:28 -0400 Subject: net: Remove unnecessary driver assignments of ethtool_ringparam fields to zero Per comments from Ben Hutchings on a previous patch, sweep the floors a little removing unnecessary assignments of zero to fields of struct ethtool_ringparam in driver code supporting ethtool -g. Signed-off-by: Rick Jones Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 4 ---- drivers/net/ethernet/marvell/skge.c | 4 ---- drivers/net/ethernet/marvell/sky2.c | 4 ---- 3 files changed, 12 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 7325737fe93b..f6821aa5ffbf 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1547,13 +1547,9 @@ mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) er->rx_max_pending = 4096; er->tx_max_pending = 4096; - er->rx_mini_max_pending = 0; - er->rx_jumbo_max_pending = 0; er->rx_pending = mp->rx_ring_size; er->tx_pending = mp->tx_ring_size; - er->rx_mini_pending = 0; - er->rx_jumbo_pending = 0; } static int diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 32db4c877ff1..297730359b79 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -497,13 +497,9 @@ static void skge_get_ring_param(struct net_device *dev, p->rx_max_pending = MAX_RX_RING_SIZE; p->tx_max_pending = MAX_TX_RING_SIZE; - p->rx_mini_max_pending = 0; - p->rx_jumbo_max_pending = 0; p->rx_pending = skge->rx_ring.count; p->tx_pending = skge->tx_ring.count; - p->rx_mini_pending = 0; - p->rx_jumbo_pending = 0; } static int skge_set_ring_param(struct net_device *dev, diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index a3ce9b6d36af..6895e3be260c 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4088,13 +4088,9 @@ static void sky2_get_ringparam(struct net_device *dev, struct sky2_port *sky2 = netdev_priv(dev); ering->rx_max_pending = RX_MAX_PENDING; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = 0; ering->tx_max_pending = TX_MAX_PENDING; ering->rx_pending = sky2->rx_pending; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = 0; ering->tx_pending = sky2->tx_pending; } -- cgit v1.2.1 From 7ae60b3f3b297b7f04025c93f1cb2275c3a1dfcd Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 13 Oct 2011 17:12:46 -0400 Subject: sky2: fix skb truesize underestimation sky2 allocates a page per skb fragment. We must account PAGE_SIZE increments on skb->truesize, not the actual frag length. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/sky2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 6895e3be260c..92634907bf8d 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2486,7 +2486,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, frag->size = size; skb->data_len += size; - skb->truesize += size; + skb->truesize += PAGE_SIZE; skb->len += size; length -= size; } -- cgit v1.2.1 From 9e903e085262ffbf1fc44a17ac06058aca03524a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 18 Oct 2011 21:00:24 +0000 Subject: net: add skb frag size accessors To ease skb->truesize sanitization, its better to be able to localize all references to skb frags size. Define accessors : skb_frag_size() to fetch frag size, and skb_frag_size_{set|add|sub}() to manipulate it. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 9 +++++---- drivers/net/ethernet/marvell/skge.c | 8 ++++---- drivers/net/ethernet/marvell/sky2.c | 16 ++++++++-------- 3 files changed, 17 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index f6821aa5ffbf..194a03113802 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -713,8 +713,9 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) int frag; for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { - skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; - if (fragp->size <= 8 && fragp->page_offset & 7) + const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; + + if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) return 1; } @@ -751,10 +752,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) } desc->l4i_chk = 0; - desc->byte_cnt = this_frag->size; + desc->byte_cnt = skb_frag_size(this_frag); desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, this_frag, 0, - this_frag->size, + skb_frag_size(this_frag), DMA_TO_DEVICE); } } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 297730359b79..c7b60839ac99 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2770,10 +2770,10 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, control |= BMU_STFWD; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); e = e->next; e->skb = skb; @@ -2783,9 +2783,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, tf->dma_lo = map; tf->dma_hi = (u64) map >> 32; dma_unmap_addr_set(e, mapaddr, map); - dma_unmap_len_set(e, maplen, frag->size); + dma_unmap_len_set(e, maplen, skb_frag_size(frag)); - tf->control = BMU_OWN | BMU_SW | control | frag->size; + tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); } tf->control |= BMU_EOF | BMU_IRQ_EOF; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 92634907bf8d..7b083c438a14 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1225,10 +1225,10 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, dma_unmap_len_set(re, data_size, size); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0, - frag->size, + skb_frag_size(frag), DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, re->frag_addr[i])) @@ -1239,7 +1239,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, map_page_error: while (--i >= 0) { pci_unmap_page(pdev, re->frag_addr[i], - skb_shinfo(skb)->frags[i].size, + skb_frag_size(&skb_shinfo(skb)->frags[i]), PCI_DMA_FROMDEVICE); } @@ -1263,7 +1263,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) pci_unmap_page(pdev, re->frag_addr[i], - skb_shinfo(skb)->frags[i].size, + skb_frag_size(&skb_shinfo(skb)->frags[i]), PCI_DMA_FROMDEVICE); } @@ -1936,7 +1936,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0, - frag->size, DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&hw->pdev->dev, mapping)) goto mapping_unwind; @@ -1952,11 +1952,11 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, re = sky2->tx_ring + slot; re->flags = TX_MAP_PAGE; dma_unmap_addr_set(re, mapaddr, mapping); - dma_unmap_len_set(re, maplen, frag->size); + dma_unmap_len_set(re, maplen, skb_frag_size(frag)); le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(lower_32_bits(mapping)); - le->length = cpu_to_le16(frag->size); + le->length = cpu_to_le16(skb_frag_size(frag)); le->ctrl = ctrl; le->opcode = OP_BUFFER | HW_OWNER; } @@ -2484,7 +2484,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, } else { size = min(length, (unsigned) PAGE_SIZE); - frag->size = size; + skb_frag_size_set(frag, size); skb->data_len += size; skb->truesize += PAGE_SIZE; skb->len += size; -- cgit v1.2.1