summaryrefslogtreecommitdiffstats
path: root/drivers/net/arm/ixp4xx_eth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/arm/ixp4xx_eth.c')
-rw-r--r--drivers/net/arm/ixp4xx_eth.c344
1 files changed, 176 insertions, 168 deletions
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index e2d702b8b2e4..26af411fc428 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -30,12 +30,11 @@
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/platform_device.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
-#define DEBUG_QUEUES 0
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
@@ -59,7 +58,6 @@
#define NAPI_WEIGHT 16
#define MDIO_INTERVAL (3 * HZ)
#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
-#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
#define NPE_ID(port_id) ((port_id) >> 4)
@@ -164,15 +162,14 @@ struct port {
struct npe *npe;
struct net_device *netdev;
struct napi_struct napi;
- struct net_device_stats stat;
- struct mii_if_info mii;
- struct delayed_work mdio_thread;
+ struct phy_device *phydev;
struct eth_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
u32 desc_tab_phys;
int id; /* logical port ID */
- u16 mii_bmcr;
+ int speed, duplex;
+ u8 firmware[4];
};
/* NPE message structure */
@@ -243,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
static spinlock_t mdio_lock;
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
+struct mii_bus *mdio_bus;
static int ports_open;
static struct port *npe_port_tab[MAX_NPES];
static struct dma_pool *dma_pool;
-static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
- int write, u16 cmd)
+static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
+ int write, u16 cmd)
{
int cycles = 0;
if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
- printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name);
- return 0;
+ printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
+ return -1;
}
if (write) {
@@ -274,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
}
if (cycles == MAX_MDIO_RETRIES) {
- printk(KERN_ERR "%s: MII write failed\n", dev->name);
- return 0;
+ printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
+ phy_id);
+ return -1;
}
#if DEBUG_MDIO
- printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name,
- cycles);
+ printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
+ phy_id, write ? "write" : "read", cycles);
#endif
if (write)
return 0;
if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
- printk(KERN_ERR "%s: MII read failed\n", dev->name);
- return 0;
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
+ phy_id);
+#endif
+ return 0xFFFF; /* don't return error */
}
return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
- (__raw_readl(&mdio_regs->mdio_status[1]) << 8);
+ ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
}
-static int mdio_read(struct net_device *dev, int phy_id, int location)
+static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
{
unsigned long flags;
- u16 val;
+ int ret;
spin_lock_irqsave(&mdio_lock, flags);
- val = mdio_cmd(dev, phy_id, location, 0, 0);
+ ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
spin_unlock_irqrestore(&mdio_lock, flags);
- return val;
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
+ phy_id, location, ret);
+#endif
+ return ret;
}
-static void mdio_write(struct net_device *dev, int phy_id, int location,
- int val)
+static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
+ u16 val)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&mdio_lock, flags);
- mdio_cmd(dev, phy_id, location, 1, val);
+ ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
spin_unlock_irqrestore(&mdio_lock, flags);
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n",
+ bus->name, phy_id, location, val, ret);
+#endif
+ return ret;
}
-static void phy_reset(struct net_device *dev, int phy_id)
+static int ixp4xx_mdio_register(void)
{
- struct port *port = netdev_priv(dev);
- int cycles = 0;
+ int err;
- mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET);
+ if (!(mdio_bus = mdiobus_alloc()))
+ return -ENOMEM;
- while (cycles < MAX_MII_RESET_RETRIES) {
- if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) {
-#if DEBUG_MDIO
- printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n",
- dev->name, cycles);
-#endif
- return;
- }
- udelay(1);
- cycles++;
- }
+ /* All MII PHY accesses use NPE-B Ethernet registers */
+ spin_lock_init(&mdio_lock);
+ mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
+ __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
+
+ mdio_bus->name = "IXP4xx MII Bus";
+ mdio_bus->read = &ixp4xx_mdio_read;
+ mdio_bus->write = &ixp4xx_mdio_write;
+ strcpy(mdio_bus->id, "0");
- printk(KERN_ERR "%s: MII reset failed\n", dev->name);
+ if ((err = mdiobus_register(mdio_bus)))
+ mdiobus_free(mdio_bus);
+ return err;
}
-static void eth_set_duplex(struct port *port)
+static void ixp4xx_mdio_remove(void)
{
- if (port->mii.full_duplex)
- __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
- &port->regs->tx_control[0]);
- else
- __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
- &port->regs->tx_control[0]);
+ mdiobus_unregister(mdio_bus);
+ mdiobus_free(mdio_bus);
}
-static void phy_check_media(struct port *port, int init)
+static void ixp4xx_adjust_link(struct net_device *dev)
{
- if (mii_check_media(&port->mii, 1, init))
- eth_set_duplex(port);
- if (port->mii.force_media) { /* mii_check_media() doesn't work */
- struct net_device *dev = port->netdev;
- int cur_link = mii_link_ok(&port->mii);
- int prev_link = netif_carrier_ok(dev);
-
- if (!prev_link && cur_link) {
- printk(KERN_INFO "%s: link up\n", dev->name);
- netif_carrier_on(dev);
- } else if (prev_link && !cur_link) {
+ struct port *port = netdev_priv(dev);
+ struct phy_device *phydev = port->phydev;
+
+ if (!phydev->link) {
+ if (port->speed) {
+ port->speed = 0;
printk(KERN_INFO "%s: link down\n", dev->name);
- netif_carrier_off(dev);
}
+ return;
}
-}
+ if (port->speed == phydev->speed && port->duplex == phydev->duplex)
+ return;
-static void mdio_thread(struct work_struct *work)
-{
- struct port *port = container_of(work, struct port, mdio_thread.work);
+ port->speed = phydev->speed;
+ port->duplex = phydev->duplex;
+
+ if (port->duplex)
+ __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
+ &port->regs->tx_control[0]);
+ else
+ __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
+ &port->regs->tx_control[0]);
- phy_check_media(port, 0);
- schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
+ printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
+ dev->name, port->speed, port->duplex ? "full" : "half");
}
@@ -412,47 +422,13 @@ static inline void debug_desc(u32 phys, struct desc *desc)
#endif
}
-static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
-{
-#if DEBUG_QUEUES
- static struct {
- int queue;
- char *name;
- } names[] = {
- { TX_QUEUE(0x10), "TX#0 " },
- { TX_QUEUE(0x20), "TX#1 " },
- { TX_QUEUE(0x00), "TX#2 " },
- { RXFREE_QUEUE(0x10), "RX-free#0 " },
- { RXFREE_QUEUE(0x20), "RX-free#1 " },
- { RXFREE_QUEUE(0x00), "RX-free#2 " },
- { TXDONE_QUEUE, "TX-done " },
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(names); i++)
- if (names[i].queue == queue)
- break;
-
- printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
- i < ARRAY_SIZE(names) ? names[i].name : "",
- is_get ? "->" : "<-", phys);
-#endif
-}
-
-static inline u32 queue_get_entry(unsigned int queue)
-{
- u32 phys = qmgr_get_entry(queue);
- debug_queue(queue, 1, phys);
- return phys;
-}
-
static inline int queue_get_desc(unsigned int queue, struct port *port,
int is_tx)
{
u32 phys, tab_phys, n_desc;
struct desc *tab;
- if (!(phys = queue_get_entry(queue)))
+ if (!(phys = qmgr_get_entry(queue)))
return -1;
phys &= ~0x1F; /* mask out non-address bits */
@@ -468,7 +444,6 @@ static inline int queue_get_desc(unsigned int queue, struct port *port,
static inline void queue_put_desc(unsigned int queue, u32 phys,
struct desc *desc)
{
- debug_queue(queue, 0, phys);
debug_desc(phys, desc);
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
@@ -498,7 +473,7 @@ static void eth_rx_irq(void *pdev)
printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
#endif
qmgr_disable_irq(port->plat->rxq);
- netif_rx_schedule(dev, &port->napi);
+ netif_rx_schedule(&port->napi);
}
static int eth_poll(struct napi_struct *napi, int budget)
@@ -526,7 +501,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
dev->name);
#endif
- netif_rx_complete(dev, napi);
+ netif_rx_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_empty(rxq) &&
netif_rx_reschedule(dev, napi)) {
@@ -562,7 +537,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
#endif
if (!skb) {
- port->stat.rx_dropped++;
+ dev->stats.rx_dropped++;
/* put the desc back on RX-ready queue */
desc->buf_len = MAX_MRU;
desc->pkt_len = 0;
@@ -588,9 +563,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
debug_pkt(dev, "eth_poll", skb->data, skb->len);
skb->protocol = eth_type_trans(skb, dev);
- dev->last_rx = jiffies;
- port->stat.rx_packets++;
- port->stat.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
/* put the new buffer on RX-free queue */
@@ -618,7 +592,7 @@ static void eth_txdone_irq(void *unused)
#if DEBUG_TX
printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
#endif
- while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) {
+ while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
u32 npe_id, n_desc;
struct port *port;
struct desc *desc;
@@ -635,8 +609,8 @@ static void eth_txdone_irq(void *unused)
debug_desc(phys, desc);
if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
- port->stat.tx_packets++;
- port->stat.tx_bytes += desc->pkt_len;
+ port->netdev->stats.tx_packets++;
+ port->netdev->stats.tx_bytes += desc->pkt_len;
dma_unmap_tx(port, desc);
#if DEBUG_TX
@@ -674,7 +648,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len > MAX_MRU)) {
dev_kfree_skb(skb);
- port->stat.tx_errors++;
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -690,7 +664,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
bytes = ALIGN(offset + len, 4);
if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
dev_kfree_skb(skb);
- port->stat.tx_dropped++;
+ dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
@@ -704,7 +678,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
#else
kfree(mem);
#endif
- port->stat.tx_dropped++;
+ dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -747,12 +721,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
}
-static struct net_device_stats *eth_stats(struct net_device *dev)
-{
- struct port *port = netdev_priv(dev);
- return &port->stat;
-}
-
static void eth_set_mcast_list(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
@@ -786,41 +754,80 @@ static void eth_set_mcast_list(struct net_device *dev)
static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct port *port = netdev_priv(dev);
- unsigned int duplex_chg;
- int err;
if (!netif_running(dev))
return -EINVAL;
- err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg);
- if (duplex_chg)
- eth_set_duplex(port);
- return err;
+ return phy_mii_ioctl(port->phydev, if_mii(req), cmd);
+}
+
+/* ethtool support */
+
+static void ixp4xx_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct port *port = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
+ port->firmware[0], port->firmware[1],
+ port->firmware[2], port->firmware[3]);
+ strcpy(info->bus_info, "internal");
}
+static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct port *port = netdev_priv(dev);
+ return phy_ethtool_gset(port->phydev, cmd);
+}
+
+static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct port *port = netdev_priv(dev);
+ return phy_ethtool_sset(port->phydev, cmd);
+}
+
+static int ixp4xx_nway_reset(struct net_device *dev)
+{
+ struct port *port = netdev_priv(dev);
+ return phy_start_aneg(port->phydev);
+}
+
+static struct ethtool_ops ixp4xx_ethtool_ops = {
+ .get_drvinfo = ixp4xx_get_drvinfo,
+ .get_settings = ixp4xx_get_settings,
+ .set_settings = ixp4xx_set_settings,
+ .nway_reset = ixp4xx_nway_reset,
+ .get_link = ethtool_op_get_link,
+};
+
static int request_queues(struct port *port)
{
int err;
- err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0);
+ err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
+ "%s:RX-free", port->netdev->name);
if (err)
return err;
- err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0);
+ err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
+ "%s:RX", port->netdev->name);
if (err)
goto rel_rxfree;
- err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0);
+ err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
+ "%s:TX", port->netdev->name);
if (err)
goto rel_rx;
- err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
+ err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
+ "%s:TX-ready", port->netdev->name);
if (err)
goto rel_tx;
/* TX-done queue handles skbs sent out by the NPEs */
if (!ports_open) {
- err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0);
+ err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
+ "%s:TX-done", DRV_NAME);
if (err)
goto rel_txready;
}
@@ -944,10 +951,12 @@ static int eth_open(struct net_device *dev)
npe_name(npe));
return -EIO;
}
+ port->firmware[0] = msg.byte4;
+ port->firmware[1] = msg.byte5;
+ port->firmware[2] = msg.byte6;
+ port->firmware[3] = msg.byte7;
}
- mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
-
memset(&msg, 0, sizeof(msg));
msg.cmd = NPE_VLAN_SETRXQOSENTRY;
msg.eth_id = port->id;
@@ -985,6 +994,9 @@ static int eth_open(struct net_device *dev)
return err;
}
+ port->speed = 0; /* force "link up" message */
+ phy_start(port->phydev);
+
for (i = 0; i < ETH_ALEN; i++)
__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
__raw_writel(0x08, &port->regs->random_seed);
@@ -1012,10 +1024,8 @@ static int eth_open(struct net_device *dev)
__raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
napi_enable(&port->napi);
- phy_check_media(port, 1);
eth_set_mcast_list(dev);
netif_start_queue(dev);
- schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
eth_rx_irq, dev);
@@ -1026,7 +1036,7 @@ static int eth_open(struct net_device *dev)
}
ports_open++;
/* we may already have RX data, enables IRQ */
- netif_rx_schedule(dev, &port->napi);
+ netif_rx_schedule(&port->napi);
return 0;
}
@@ -1106,25 +1116,31 @@ static int eth_close(struct net_device *dev)
printk(KERN_CRIT "%s: unable to disable loopback\n",
dev->name);
- port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) &
- ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
- mdio_write(dev, port->plat->phy, MII_BMCR,
- port->mii_bmcr | BMCR_PDOWN);
+ phy_stop(port->phydev);
if (!ports_open)
qmgr_disable_irq(TXDONE_QUEUE);
- cancel_rearming_delayed_work(&port->mdio_thread);
destroy_queues(port);
release_queues(port);
return 0;
}
+static const struct net_device_ops ixp4xx_netdev_ops = {
+ .ndo_open = eth_open,
+ .ndo_stop = eth_close,
+ .ndo_start_xmit = eth_xmit,
+ .ndo_set_multicast_list = eth_set_mcast_list,
+ .ndo_do_ioctl = eth_ioctl,
+
+};
+
static int __devinit eth_init_one(struct platform_device *pdev)
{
struct port *port;
struct net_device *dev;
struct eth_plat_info *plat = pdev->dev.platform_data;
u32 regs_phys;
+ char phy_id[BUS_ID_SIZE];
int err;
if (!(dev = alloc_etherdev(sizeof(struct port))))
@@ -1153,12 +1169,8 @@ static int __devinit eth_init_one(struct platform_device *pdev)
goto err_free;
}
- dev->open = eth_open;
- dev->hard_start_xmit = eth_xmit;
- dev->stop = eth_close;
- dev->get_stats = eth_stats;
- dev->do_ioctl = eth_ioctl;
- dev->set_multicast_list = eth_set_mcast_list;
+ dev->netdev_ops = &ixp4xx_netdev_ops;
+ dev->ethtool_ops = &ixp4xx_ethtool_ops;
dev->tx_queue_len = 100;
netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
@@ -1191,22 +1203,19 @@ static int __devinit eth_init_one(struct platform_device *pdev)
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
udelay(50);
- port->mii.dev = dev;
- port->mii.mdio_read = mdio_read;
- port->mii.mdio_write = mdio_write;
- port->mii.phy_id = plat->phy;
- port->mii.phy_id_mask = 0x1F;
- port->mii.reg_num_mask = 0x1F;
+ snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
+ port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(port->phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(port->phydev);
+ }
+
+ port->phydev->irq = PHY_POLL;
printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
npe_name(port->npe));
- phy_reset(dev, plat->phy);
- port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
- ~(BMCR_RESET | BMCR_PDOWN);
- mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
-
- INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
return 0;
err_unreg:
@@ -1232,7 +1241,7 @@ static int __devexit eth_remove_one(struct platform_device *pdev)
return 0;
}
-static struct platform_driver drv = {
+static struct platform_driver ixp4xx_eth_driver = {
.driver.name = DRV_NAME,
.probe = eth_init_one,
.remove = eth_remove_one,
@@ -1240,20 +1249,19 @@ static struct platform_driver drv = {
static int __init eth_init_module(void)
{
+ int err;
if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
return -ENOSYS;
- /* All MII PHY accesses use NPE-B Ethernet registers */
- spin_lock_init(&mdio_lock);
- mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
- __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
-
- return platform_driver_register(&drv);
+ if ((err = ixp4xx_mdio_register()))
+ return err;
+ return platform_driver_register(&ixp4xx_eth_driver);
}
static void __exit eth_cleanup_module(void)
{
- platform_driver_unregister(&drv);
+ platform_driver_unregister(&ixp4xx_eth_driver);
+ ixp4xx_mdio_remove();
}
MODULE_AUTHOR("Krzysztof Halasa");
OpenPOWER on IntegriCloud