summaryrefslogtreecommitdiffstats
path: root/drivers/net/niu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-01-05 10:50:33 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-01-05 10:50:33 +0100
commit353816f43d1fb340ff2d9a911dd5d0799c09f6a5 (patch)
tree517290fd884d286fe2971137ac89f89e3567785a /drivers/net/niu.c
parent160bbab3000dafccbe43688e48208cecf4deb879 (diff)
parentfe0bdec68b77020281dc814805edfe594ae89e0f (diff)
downloadblackbird-op-linux-353816f43d1fb340ff2d9a911dd5d0799c09f6a5.tar.gz
blackbird-op-linux-353816f43d1fb340ff2d9a911dd5d0799c09f6a5.zip
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: arch/arm/mach-pxa/corgi.c arch/arm/mach-pxa/poodle.c arch/arm/mach-pxa/spitz.c
Diffstat (limited to 'drivers/net/niu.c')
-rw-r--r--drivers/net/niu.c145
1 files changed, 115 insertions, 30 deletions
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 1b6f548c4411..0c0b752315ca 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -448,7 +448,7 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
struct niu_link_config *lp = &np->link_config;
u16 pll_cfg, pll_sts;
int max_retry = 100;
- u64 sig, mask, val;
+ u64 uninitialized_var(sig), mask, val;
u32 tx_cfg, rx_cfg;
unsigned long i;
int err;
@@ -547,7 +547,7 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
struct niu_link_config *lp = &np->link_config;
u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
int max_retry = 100;
- u64 sig, mask, val;
+ u64 uninitialized_var(sig), mask, val;
unsigned long i;
int err;
@@ -738,7 +738,7 @@ static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
static int esr_reset(struct niu *np)
{
- u32 reset;
+ u32 uninitialized_var(reset);
int err;
err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
@@ -3392,8 +3392,6 @@ static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
skb->protocol = eth_type_trans(skb, np->dev);
netif_receive_skb(skb);
- np->dev->last_rx = jiffies;
-
return num_rcr;
}
@@ -3529,6 +3527,57 @@ out:
}
}
+static inline void niu_sync_rx_discard_stats(struct niu *np,
+ struct rx_ring_info *rp,
+ const int limit)
+{
+ /* This elaborate scheme is needed for reading the RX discard
+ * counters, as they are only 16-bit and can overflow quickly,
+ * and because the overflow indication bit is not usable as
+ * the counter value does not wrap, but remains at max value
+ * 0xFFFF.
+ *
+ * In theory and in practice counters can be lost in between
+ * reading nr64() and clearing the counter nw64(). For this
+ * reason, the number of counter clearings nw64() is
+ * limited/reduced though the limit parameter.
+ */
+ int rx_channel = rp->rx_channel;
+ u32 misc, wred;
+
+ /* RXMISC (Receive Miscellaneous Discard Count), covers the
+ * following discard events: IPP (Input Port Process),
+ * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
+ * Block Ring) prefetch buffer is empty.
+ */
+ misc = nr64(RXMISC(rx_channel));
+ if (unlikely((misc & RXMISC_COUNT) > limit)) {
+ nw64(RXMISC(rx_channel), 0);
+ rp->rx_errors += misc & RXMISC_COUNT;
+
+ if (unlikely(misc & RXMISC_OFLOW))
+ dev_err(np->device, "rx-%d: Counter overflow "
+ "RXMISC discard\n", rx_channel);
+
+ niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
+ np->dev->name, rx_channel, misc, misc-limit);
+ }
+
+ /* WRED (Weighted Random Early Discard) by hardware */
+ wred = nr64(RED_DIS_CNT(rx_channel));
+ if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
+ nw64(RED_DIS_CNT(rx_channel), 0);
+ rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
+
+ if (unlikely(wred & RED_DIS_CNT_OFLOW))
+ dev_err(np->device, "rx-%d: Counter overflow "
+ "WRED discard\n", rx_channel);
+
+ niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
+ np->dev->name, rx_channel, wred, wred-limit);
+ }
+}
+
static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
{
int qlen, rcr_done = 0, work_done = 0;
@@ -3569,6 +3618,10 @@ static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
+ /* Only sync discards stats when qlen indicate potential for drops */
+ if (qlen > 10)
+ niu_sync_rx_discard_stats(np, rp, 0x7FFF);
+
return work_done;
}
@@ -3616,7 +3669,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
work_done = niu_poll_core(np, lp, budget);
if (work_done < budget) {
- netif_rx_complete(np->dev, napi);
+ netif_rx_complete(napi);
niu_ldg_rearm(np, lp, 1);
}
return work_done;
@@ -4035,12 +4088,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
u64 v0, u64 v1, u64 v2)
{
- if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) {
+ if (likely(netif_rx_schedule_prep(&lp->napi))) {
lp->v0 = v0;
lp->v1 = v1;
lp->v2 = v2;
__niu_fastpath_interrupt(np, lp->ldg_num, v0);
- __netif_rx_schedule(np->dev, &lp->napi);
+ __netif_rx_schedule(&lp->napi);
}
}
@@ -5849,17 +5902,42 @@ static void niu_stop_hw(struct niu *np)
niu_reset_rx_channels(np);
}
+static void niu_set_irq_name(struct niu *np)
+{
+ int port = np->port;
+ int i, j = 1;
+
+ sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
+
+ if (port == 0) {
+ sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
+ sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
+ j = 3;
+ }
+
+ for (i = 0; i < np->num_ldg - j; i++) {
+ if (i < np->num_rx_rings)
+ sprintf(np->irq_name[i+j], "%s-rx-%d",
+ np->dev->name, i);
+ else if (i < np->num_tx_rings + np->num_rx_rings)
+ sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
+ i - np->num_rx_rings);
+ }
+}
+
static int niu_request_irq(struct niu *np)
{
int i, j, err;
+ niu_set_irq_name(np);
+
err = 0;
for (i = 0; i < np->num_ldg; i++) {
struct niu_ldg *lp = &np->ldg[i];
err = request_irq(lp->irq, niu_interrupt,
IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- np->dev->name, lp);
+ np->irq_name[i], lp);
if (err)
goto out_free_irqs;
@@ -6050,15 +6128,17 @@ static void niu_get_rx_stats(struct niu *np)
for (i = 0; i < np->num_rx_rings; i++) {
struct rx_ring_info *rp = &np->rx_rings[i];
+ niu_sync_rx_discard_stats(np, rp, 0);
+
pkts += rp->rx_packets;
bytes += rp->rx_bytes;
dropped += rp->rx_dropped;
errors += rp->rx_errors;
}
- np->net_stats.rx_packets = pkts;
- np->net_stats.rx_bytes = bytes;
- np->net_stats.rx_dropped = dropped;
- np->net_stats.rx_errors = errors;
+ np->dev->stats.rx_packets = pkts;
+ np->dev->stats.rx_bytes = bytes;
+ np->dev->stats.rx_dropped = dropped;
+ np->dev->stats.rx_errors = errors;
}
static void niu_get_tx_stats(struct niu *np)
@@ -6074,9 +6154,9 @@ static void niu_get_tx_stats(struct niu *np)
bytes += rp->tx_bytes;
errors += rp->tx_errors;
}
- np->net_stats.tx_packets = pkts;
- np->net_stats.tx_bytes = bytes;
- np->net_stats.tx_errors = errors;
+ np->dev->stats.tx_packets = pkts;
+ np->dev->stats.tx_bytes = bytes;
+ np->dev->stats.tx_errors = errors;
}
static struct net_device_stats *niu_get_stats(struct net_device *dev)
@@ -6086,7 +6166,7 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
niu_get_rx_stats(np);
niu_get_tx_stats(np);
- return &np->net_stats;
+ return &dev->stats;
}
static void niu_load_hash_xmac(struct niu *np, u16 *hash)
@@ -6991,6 +7071,8 @@ static void niu_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < np->num_rx_rings; i++) {
struct rx_ring_info *rp = &np->rx_rings[i];
+ niu_sync_rx_discard_stats(np, rp, 0);
+
data[0] = rp->rx_channel;
data[1] = rp->rx_packets;
data[2] = rp->rx_bytes;
@@ -8824,7 +8906,7 @@ static u64 niu_pci_map_page(struct device *dev, struct page *page,
static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
size_t size, enum dma_data_direction direction)
{
- return dma_unmap_page(dev, dma_address, size, direction);
+ dma_unmap_page(dev, dma_address, size, direction);
}
static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
@@ -8891,28 +8973,31 @@ static struct net_device * __devinit niu_alloc_and_init(
return dev;
}
+static const struct net_device_ops niu_netdev_ops = {
+ .ndo_open = niu_open,
+ .ndo_stop = niu_close,
+ .ndo_start_xmit = niu_start_xmit,
+ .ndo_get_stats = niu_get_stats,
+ .ndo_set_multicast_list = niu_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = niu_set_mac_addr,
+ .ndo_do_ioctl = niu_ioctl,
+ .ndo_tx_timeout = niu_tx_timeout,
+ .ndo_change_mtu = niu_change_mtu,
+};
+
static void __devinit niu_assign_netdev_ops(struct net_device *dev)
{
- dev->open = niu_open;
- dev->stop = niu_close;
- dev->get_stats = niu_get_stats;
- dev->set_multicast_list = niu_set_rx_mode;
- dev->set_mac_address = niu_set_mac_addr;
- dev->do_ioctl = niu_ioctl;
- dev->tx_timeout = niu_tx_timeout;
- dev->hard_start_xmit = niu_start_xmit;
+ dev->netdev_ops = &niu_netdev_ops;
dev->ethtool_ops = &niu_ethtool_ops;
dev->watchdog_timeo = NIU_TX_TIMEOUT;
- dev->change_mtu = niu_change_mtu;
}
static void __devinit niu_device_announce(struct niu *np)
{
struct net_device *dev = np->dev;
- DECLARE_MAC_BUF(mac);
- pr_info("%s: NIU Ethernet %s\n",
- dev->name, print_mac(mac, dev->dev_addr));
+ pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
OpenPOWER on IntegriCloud