summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/synclink_gt.c2
-rw-r--r--drivers/char/tlclk.c93
-rw-r--r--drivers/edac/Kconfig102
-rw-r--r--drivers/edac/Makefile18
-rw-r--r--drivers/edac/amd76x_edac.c356
-rw-r--r--drivers/edac/e752x_edac.c1071
-rw-r--r--drivers/edac/e7xxx_edac.c558
-rw-r--r--drivers/edac/edac_mc.c2209
-rw-r--r--drivers/edac/edac_mc.h448
-rw-r--r--drivers/edac/i82860_edac.c299
-rw-r--r--drivers/edac/i82875p_edac.c532
-rw-r--r--drivers/edac/r82600_edac.c407
-rw-r--r--drivers/md/kcopyd.c1
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/cassini.c40
-rw-r--r--drivers/net/e100.c142
-rw-r--r--drivers/net/e1000/e1000.h51
-rw-r--r--drivers/net/e1000/e1000_ethtool.c474
-rw-r--r--drivers/net/e1000/e1000_hw.c71
-rw-r--r--drivers/net/e1000/e1000_hw.h45
-rw-r--r--drivers/net/e1000/e1000_main.c1612
-rw-r--r--drivers/net/e1000/e1000_osdep.h2
-rw-r--r--drivers/net/e1000/e1000_param.c58
-rw-r--r--drivers/net/mv643xx_eth.c680
-rw-r--r--drivers/net/skge.c20
-rw-r--r--drivers/net/sky2.c219
-rw-r--r--drivers/net/spider_net.c512
-rw-r--r--drivers/net/spider_net.h75
-rw-r--r--drivers/net/spider_net_ethtool.c19
-rw-r--r--drivers/net/tg3.c82
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/wireless/airo.c21
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/hostap/Kconfig22
-rw-r--r--drivers/net/wireless/hostap/Makefile3
-rw-r--r--drivers/net/wireless/hostap/hostap.h37
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c11
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c36
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h13
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c12
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c60
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h4
-rw-r--r--drivers/net/wireless/ipw2100.c434
-rw-r--r--drivers/net/wireless/ipw2200.c14
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c2
-rw-r--r--drivers/pci/quirks.c5
-rw-r--r--drivers/scsi/ahci.c10
-rw-r--r--drivers/scsi/ata_piix.c3
-rw-r--r--drivers/scsi/libata-core.c73
-rw-r--r--drivers/scsi/sata_promise.c16
-rw-r--r--drivers/scsi/sata_svw.c1
-rw-r--r--drivers/serial/8250.c13
-rw-r--r--drivers/serial/8250_pci.c10
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/serial/at91_serial.c2
-rw-r--r--drivers/serial/suncore.c34
-rw-r--r--drivers/serial/sunsab.c7
-rw-r--r--drivers/video/sbuslib.c9
-rw-r--r--drivers/video/sbuslib.h2
71 files changed, 8917 insertions, 2196 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 283c089537bc..bddf431bbb72 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -68,4 +68,6 @@ source "drivers/infiniband/Kconfig"
source "drivers/sn/Kconfig"
+source "drivers/edac/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 7c45050ecd03..619dd964c51c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_PHONE) += telephony/
obj-$(CONFIG_MD) += md/
obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_ISDN) += isdn/
+obj-$(CONFIG_EDAC) += edac/
obj-$(CONFIG_MCA) += mca/
obj-$(CONFIG_EISA) += eisa/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 07c9be6a6bbf..a85a60a93deb 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -2630,7 +2630,7 @@ static int get_interface(struct slgt_info *info, int __user *if_mode)
static int set_interface(struct slgt_info *info, int if_mode)
{
unsigned long flags;
- unsigned char val;
+ unsigned short val;
DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
spin_lock_irqsave(&info->lock,flags);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index bc56df8a3474..4c272189cd42 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h> /* printk() */
#include <linux/fs.h> /* everything... */
#include <linux/errno.h> /* error codes */
-#include <linux/delay.h> /* udelay */
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
@@ -156,6 +155,8 @@ This directory exports the following interfaces. There operation is
documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
alarms :
current_ref :
+received_ref_clk3a :
+received_ref_clk3b :
enable_clk3a_output :
enable_clk3b_output :
enable_clka0_output :
@@ -165,7 +166,7 @@ enable_clkb1_output :
filter_select :
hardware_switching :
hardware_switching_mode :
-interrupt_switch :
+telclock_version :
mode_select :
refalign :
reset :
@@ -173,7 +174,6 @@ select_amcb1_transmit_clock :
select_amcb2_transmit_clock :
select_redundant_clock :
select_ref_frequency :
-test_mode :
All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
has the same effect as echo 0x99 > refalign.
@@ -226,7 +226,7 @@ static int tlclk_release(struct inode *inode, struct file *filp)
return 0;
}
-ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
+static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
if (count < sizeof(struct tlclk_alarms))
@@ -242,7 +242,7 @@ ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
return sizeof(struct tlclk_alarms);
}
-ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
+static ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
loff_t *f_pos)
{
return 0;
@@ -278,21 +278,21 @@ static ssize_t show_current_ref(struct device *d,
static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
-static ssize_t show_interrupt_switch(struct device *d,
+static ssize_t show_telclock_version(struct device *d,
struct device_attribute *attr, char *buf)
{
unsigned long ret_val;
unsigned long flags;
spin_lock_irqsave(&event_lock, flags);
- ret_val = inb(TLCLK_REG6);
+ ret_val = inb(TLCLK_REG5);
spin_unlock_irqrestore(&event_lock, flags);
return sprintf(buf, "0x%lX\n", ret_val);
}
-static DEVICE_ATTR(interrupt_switch, S_IRUGO,
- show_interrupt_switch, NULL);
+static DEVICE_ATTR(telclock_version, S_IRUGO,
+ show_telclock_version, NULL);
static ssize_t show_alarms(struct device *d,
struct device_attribute *attr, char *buf)
@@ -309,6 +309,50 @@ static ssize_t show_alarms(struct device *d,
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static ssize_t store_received_ref_clk3a(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xef, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3a, S_IWUGO, NULL,
+ store_received_ref_clk3a);
+
+
+static ssize_t store_received_ref_clk3b(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xef, val << 1);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3b, S_IWUGO, NULL,
+ store_received_ref_clk3b);
+
+
static ssize_t store_enable_clk3b_output(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -436,26 +480,6 @@ static ssize_t store_enable_clka0_output(struct device *d,
static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL,
store_enable_clka0_output);
-static ssize_t store_test_mode(struct device *d,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- unsigned long flags;
- unsigned long tmp;
- unsigned char val;
-
- sscanf(buf, "%lX", &tmp);
- dev_dbg(d, "tmp = 0x%lX\n", tmp);
-
- val = (unsigned char)tmp;
- spin_lock_irqsave(&event_lock, flags);
- SET_PORT_BITS(TLCLK_REG4, 0xfd, 2);
- spin_unlock_irqrestore(&event_lock, flags);
-
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(test_mode, S_IWUGO, NULL, store_test_mode);
-
static ssize_t store_select_amcb2_transmit_clock(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -475,7 +499,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
switch (val) {
case CLK_8_592MHz:
- SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
break;
case CLK_11_184MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
@@ -484,7 +508,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
break;
case CLK_44_736MHz:
- SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
break;
}
} else
@@ -653,9 +677,7 @@ static ssize_t store_refalign (struct device *d,
dev_dbg(d, "tmp = 0x%lX\n", tmp);
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
- udelay(2);
SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
- udelay(2);
SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
spin_unlock_irqrestore(&event_lock, flags);
@@ -706,15 +728,16 @@ static DEVICE_ATTR(reset, S_IWUGO, NULL, store_reset);
static struct attribute *tlclk_sysfs_entries[] = {
&dev_attr_current_ref.attr,
- &dev_attr_interrupt_switch.attr,
+ &dev_attr_telclock_version.attr,
&dev_attr_alarms.attr,
+ &dev_attr_received_ref_clk3a.attr,
+ &dev_attr_received_ref_clk3b.attr,
&dev_attr_enable_clk3a_output.attr,
&dev_attr_enable_clk3b_output.attr,
&dev_attr_enable_clkb1_output.attr,
&dev_attr_enable_clka1_output.attr,
&dev_attr_enable_clkb0_output.attr,
&dev_attr_enable_clka0_output.attr,
- &dev_attr_test_mode.attr,
&dev_attr_select_amcb1_transmit_clock.attr,
&dev_attr_select_amcb2_transmit_clock.attr,
&dev_attr_select_redundant_clock.attr,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
new file mode 100644
index 000000000000..4819e7fc00dd
--- /dev/null
+++ b/drivers/edac/Kconfig
@@ -0,0 +1,102 @@
+#
+# EDAC Kconfig
+# Copyright (c) 2003 Linux Networx
+# Licensed and distributed under the GPL
+#
+# $Id: Kconfig,v 1.4.2.7 2005/07/08 22:05:38 dsp_llnl Exp $
+#
+
+menu 'EDAC - error detection and reporting (RAS)'
+
+config EDAC
+ tristate "EDAC core system error reporting"
+ depends on X86
+ default y
+ help
+ EDAC is designed to report errors in the core system.
+ These are low-level errors that are reported in the CPU or
+ supporting chipset: memory errors, cache errors, PCI errors,
+ thermal throttling, etc.. If unsure, select 'Y'.
+
+
+comment "Reporting subsystems"
+ depends on EDAC
+
+config EDAC_DEBUG
+ bool "Debugging"
+ depends on EDAC
+ help
+ This turns on debugging information for the entire EDAC
+ sub-system. You can insert module with "debug_level=x", current
+ there're four debug levels (x=0,1,2,3 from low to high).
+ Usually you should select 'N'.
+
+config EDAC_MM_EDAC
+ tristate "Main Memory EDAC (Error Detection And Correction) reporting"
+ depends on EDAC
+ default y
+ help
+ Some systems are able to detect and correct errors in main
+ memory. EDAC can report statistics on memory error
+ detection and correction (EDAC - or commonly referred to ECC
+ errors). EDAC will also try to decode where these errors
+ occurred so that a particular failing memory module can be
+ replaced. If unsure, select 'Y'.
+
+
+config EDAC_AMD76X
+ tristate "AMD 76x (760, 762, 768)"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the AMD 76x
+ series of chipsets used with the Athlon processor.
+
+config EDAC_E7XXX
+ tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the Intel
+ E7205, E7500, E7501 and E7505 server chipsets.
+
+config EDAC_E752X
+ tristate "Intel e752x (e7520, e7525, e7320)"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the Intel
+ E7520, E7525, E7320 server chipsets.
+
+config EDAC_I82875P
+ tristate "Intel 82875p (D82875P, E7210)"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the Intel
+ DP82785P and E7210 server chipsets.
+
+config EDAC_I82860
+ tristate "Intel 82860"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the Intel
+ 82860 chipset.
+
+config EDAC_R82600
+ tristate "Radisys 82600 embedded chipset"
+ depends on EDAC_MM_EDAC
+ help
+ Support for error detection and correction on the Radisys
+ 82600 embedded chipset.
+
+choice
+ prompt "Error detecting method"
+ depends on EDAC
+ default EDAC_POLL
+
+config EDAC_POLL
+ bool "Poll for errors"
+ depends on EDAC
+ help
+ Poll the chipset periodically to detect errors.
+
+endchoice
+
+endmenu
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
new file mode 100644
index 000000000000..93137fdab4b3
--- /dev/null
+++ b/drivers/edac/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux kernel EDAC drivers.
+#
+# Copyright 02 Jul 2003, Linux Networx (http://lnxi.com)
+# This file may be distributed under the terms of the
+# GNU General Public License.
+#
+# $Id: Makefile,v 1.4.2.3 2005/07/08 22:05:38 dsp_llnl Exp $
+
+
+obj-$(CONFIG_EDAC_MM_EDAC) += edac_mc.o
+obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
+obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
+obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
+obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
+obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
+obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
+
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
new file mode 100644
index 000000000000..2fcc8120b53c
--- /dev/null
+++ b/drivers/edac/amd76x_edac.c
@@ -0,0 +1,356 @@
+/*
+ * AMD 76x Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+
+#define AMD76X_NR_CSROWS 8
+#define AMD76X_NR_CHANS 1
+#define AMD76X_NR_DIMMS 4
+
+
+/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
+#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
+ *
+ * 31:16 reserved
+ * 15:14 SERR enabled: x1=ue 1x=ce
+ * 13 reserved
+ * 12 diag: disabled, enabled
+ * 11:10 mode: dis, EC, ECC, ECC+scrub
+ * 9:8 status: x1=ue 1x=ce
+ * 7:4 UE cs row
+ * 3:0 CE cs row
+ */
+#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
+ *
+ * 31:26 clock disable 5 - 0
+ * 25 SDRAM init
+ * 24 reserved
+ * 23 mode register service
+ * 22:21 suspend to RAM
+ * 20 burst refresh enable
+ * 19 refresh disable
+ * 18 reserved
+ * 17:16 cycles-per-refresh
+ * 15:8 reserved
+ * 7:0 x4 mode enable 7 - 0
+ */
+#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
+ *
+ * 31:23 chip-select base
+ * 22:16 reserved
+ * 15:7 chip-select mask
+ * 6:3 reserved
+ * 2:1 address mode
+ * 0 chip-select enable
+ */
+
+
+struct amd76x_error_info {
+ u32 ecc_mode_status;
+};
+
+
+enum amd76x_chips {
+ AMD761 = 0,
+ AMD762
+};
+
+
+struct amd76x_dev_info {
+ const char *ctl_name;
+};
+
+
+static const struct amd76x_dev_info amd76x_devs[] = {
+ [AMD761] = {.ctl_name = "AMD761"},
+ [AMD762] = {.ctl_name = "AMD762"},
+};
+
+
+/**
+ * amd76x_get_error_info - fetch error information
+ * @mci: Memory controller
+ * @info: Info to fill in
+ *
+ * Fetch and store the AMD76x ECC status. Clear pending status
+ * on the chip so that further errors will be reported
+ */
+
+static void amd76x_get_error_info (struct mem_ctl_info *mci,
+ struct amd76x_error_info *info)
+{
+ pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS,
+ &info->ecc_mode_status);
+
+ if (info->ecc_mode_status & BIT(8))
+ pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
+ (u32) BIT(8), (u32) BIT(8));
+
+ if (info->ecc_mode_status & BIT(9))
+ pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
+ (u32) BIT(9), (u32) BIT(9));
+}
+
+
+/**
+ * amd76x_process_error_info - Error check
+ * @mci: Memory controller
+ * @info: Previously fetched information from chip
+ * @handle_errors: 1 if we should do recovery
+ *
+ * Process the chip state and decide if an error has occurred.
+ * A return of 1 indicates an error. Also if handle_errors is true
+ * then attempt to handle and clean up after the error
+ */
+
+static int amd76x_process_error_info (struct mem_ctl_info *mci,
+ struct amd76x_error_info *info, int handle_errors)
+{
+ int error_found;
+ u32 row;
+
+ error_found = 0;
+
+ /*
+ * Check for an uncorrectable error
+ */
+ if (info->ecc_mode_status & BIT(8)) {
+ error_found = 1;
+
+ if (handle_errors) {
+ row = (info->ecc_mode_status >> 4) & 0xf;
+ edac_mc_handle_ue(mci,
+ mci->csrows[row].first_page, 0, row,
+ mci->ctl_name);
+ }
+ }
+
+ /*
+ * Check for a correctable error
+ */
+ if (info->ecc_mode_status & BIT(9)) {
+ error_found = 1;
+
+ if (handle_errors) {
+ row = info->ecc_mode_status & 0xf;
+ edac_mc_handle_ce(mci,
+ mci->csrows[row].first_page, 0, 0, row, 0,
+ mci->ctl_name);
+ }
+ }
+ return error_found;
+}
+
+/**
+ * amd76x_check - Poll the controller
+ * @mci: Memory controller
+ *
+ * Called by the poll handlers this function reads the status
+ * from the controller and checks for errors.
+ */
+
+static void amd76x_check(struct mem_ctl_info *mci)
+{
+ struct amd76x_error_info info;
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ amd76x_get_error_info(mci, &info);
+ amd76x_process_error_info(mci, &info, 1);
+}
+
+
+/**
+ * amd76x_probe1 - Perform set up for detected device
+ * @pdev; PCI device detected
+ * @dev_idx: Device type index
+ *
+ * We have found an AMD76x and now need to set up the memory
+ * controller status reporting. We configure and set up the
+ * memory controller reporting and claim the device.
+ */
+
+static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ struct mem_ctl_info *mci = NULL;
+ enum edac_type ems_modes[] = {
+ EDAC_NONE,
+ EDAC_EC,
+ EDAC_SECDED,
+ EDAC_SECDED
+ };
+ u32 ems;
+ u32 ems_mode;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
+ ems_mode = (ems >> 10) & 0x3;
+
+ mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
+
+ if (mci == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+ mci->pdev = pci_dev_get(pdev);
+ mci->mtype_cap = MEM_FLAG_RDDR;
+
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->edac_cap = ems_mode ?
+ (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
+
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.4.2.5 $";
+ mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
+ mci->edac_check = amd76x_check;
+ mci->ctl_page_to_phys = NULL;
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ struct csrow_info *csrow = &mci->csrows[index];
+ u32 mba;
+ u32 mba_base;
+ u32 mba_mask;
+ u32 dms;
+
+ /* find the DRAM Chip Select Base address and mask */
+ pci_read_config_dword(mci->pdev,
+ AMD76X_MEM_BASE_ADDR + (index * 4),
+ &mba);
+
+ if (!(mba & BIT(0)))
+ continue;
+
+ mba_base = mba & 0xff800000UL;
+ mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
+
+ pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
+ &dms);
+
+ csrow->first_page = mba_base >> PAGE_SHIFT;
+ csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->page_mask = mba_mask >> PAGE_SHIFT;
+ csrow->grain = csrow->nr_pages << PAGE_SHIFT;
+ csrow->mtype = MEM_RDDR;
+ csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+ csrow->edac_mode = ems_modes[ems_mode];
+ }
+
+ /* clear counters */
+ pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8),
+ (u32) (0x3 << 8));
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ if (mci) {
+ if(mci->pdev)
+ pci_dev_put(mci->pdev);
+ edac_mc_free(mci);
+ }
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit amd76x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* don't need to call pci_device_enable() */
+ return amd76x_probe1(pdev, ent->driver_data);
+}
+
+
+/**
+ * amd76x_remove_one - driver shutdown
+ * @pdev: PCI device being handed back
+ *
+ * Called when the driver is unloaded. Find the matching mci
+ * structure for the device then delete the mci and free the
+ * resources.
+ */
+
+static void __devexit amd76x_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+ return;
+ if (edac_mc_del_mc(mci))
+ return;
+ pci_dev_put(mci->pdev);
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD762},
+ {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD761},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
+
+
+static struct pci_driver amd76x_driver = {
+ .name = BS_MOD_STR,
+ .probe = amd76x_init_one,
+ .remove = __devexit_p(amd76x_remove_one),
+ .id_table = amd76x_pci_tbl,
+};
+
+static int __init amd76x_init(void)
+{
+ return pci_register_driver(&amd76x_driver);
+}
+
+static void __exit amd76x_exit(void)
+{
+ pci_unregister_driver(&amd76x_driver);
+}
+
+module_init(amd76x_init);
+module_exit(amd76x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
new file mode 100644
index 000000000000..770a5a633079
--- /dev/null
+++ b/drivers/edac/e752x_edac.c
@@ -0,0 +1,1071 @@
+/*
+ * Intel e752x Memory Controller kernel module
+ * (C) 2004 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e752x_chips" below for supported chipsets
+ *
+ * Written by Tom Zimmerman
+ *
+ * Contributors:
+ * Thayne Harbaugh at realmsys.com (?)
+ * Wang Zhenyu at intel.com
+ * Dave Jiang at mvista.com
+ *
+ * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_0
+#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
+#endif /* PCI_DEVICE_ID_INTEL_7520_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
+#define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
+#endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_0
+#define PCI_DEVICE_ID_INTEL_7525_0 0x359E
+#endif /* PCI_DEVICE_ID_INTEL_7525_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
+#define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
+#endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_0
+#define PCI_DEVICE_ID_INTEL_7320_0 0x3592
+#endif /* PCI_DEVICE_ID_INTEL_7320_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
+#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
+#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
+
+#define E752X_NR_CSROWS 8 /* number of csrows */
+
+
+/* E752X register addresses - device 0 function 0 */
+#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
+#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
+ /*
+ * 31:30 Device width row 7
+ * 01=x8 10=x4 11=x8 DDR2
+ * 27:26 Device width row 6
+ * 23:22 Device width row 5
+ * 19:20 Device width row 4
+ * 15:14 Device width row 3
+ * 11:10 Device width row 2
+ * 7:6 Device width row 1
+ * 3:2 Device width row 0
+ */
+#define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
+ /* FIXME:IS THIS RIGHT? */
+ /*
+ * 22 Number channels 0=1,1=2
+ * 19:18 DRB Granularity 32/64MB
+ */
+#define E752X_DRM 0x80 /* Dimm mapping register */
+#define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
+ /*
+ * 14:12 1 single A, 2 single B, 3 dual
+ */
+#define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
+#define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
+#define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
+#define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
+
+/* E752X register addresses - device 0 function 1 */
+#define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
+#define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
+#define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
+#define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
+#define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
+#define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
+#define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
+#define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
+#define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
+#define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
+#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
+#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
+#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
+#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */
+#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
+#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
+#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
+#define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
+#define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
+#define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
+ /* error syndrome register (16b) */
+#define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
+ /* error syndrome register (16b) */
+#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
+
+/* ICH5R register addresses - device 30 function 0 */
+#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
+#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
+#define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
+
+enum e752x_chips {
+ E7520 = 0,
+ E7525 = 1,
+ E7320 = 2
+};
+
+
+struct e752x_pvt {
+ struct pci_dev *bridge_ck;
+ struct pci_dev *dev_d0f0;
+ struct pci_dev *dev_d0f1;
+ u32 tolm;
+ u32 remapbase;
+ u32 remaplimit;
+ int mc_symmetric;
+ u8 map[8];
+ int map_type;
+ const struct e752x_dev_info *dev_info;
+};
+
+
+struct e752x_dev_info {
+ u16 err_dev;
+ const char *ctl_name;
+};
+
+struct e752x_error_info {
+ u32 ferr_global;
+ u32 nerr_global;
+ u8 hi_ferr;
+ u8 hi_nerr;
+ u16 sysbus_ferr;
+ u16 sysbus_nerr;
+ u8 buf_ferr;
+ u8 buf_nerr;
+ u16 dram_ferr;
+ u16 dram_nerr;
+ u32 dram_sec1_add;
+ u32 dram_sec2_add;
+ u16 dram_sec1_syndrome;
+ u16 dram_sec2_syndrome;
+ u32 dram_ded_add;
+ u32 dram_scrb_add;
+ u32 dram_retr_add;
+};
+
+static const struct e752x_dev_info e752x_devs[] = {
+ [E7520] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
+ .ctl_name = "E7520"},
+ [E7525] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
+ .ctl_name = "E7525"},
+ [E7320] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
+ .ctl_name = "E7320"},
+};
+
+
+static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
+ unsigned long page)
+{
+ u32 remap;
+ struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if (page < pvt->tolm)
+ return page;
+ if ((page >= 0x100000) && (page < pvt->remapbase))
+ return page;
+ remap = (page - pvt->tolm) + pvt->remapbase;
+ if (remap < pvt->remaplimit)
+ return remap;
+ printk(KERN_ERR "Invalid page %lx - out of range\n", page);
+ return pvt->tolm - 1;
+}
+
+static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
+ u32 sec1_add, u16 sec1_syndrome)
+{
+ u32 page;
+ int row;
+ int channel;
+ int i;
+ struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* convert the addr to 4k page */
+ page = sec1_add >> (PAGE_SHIFT - 4);
+
+ /* FIXME - check for -1 */
+ if (pvt->mc_symmetric) {
+ /* chip select are bits 14 & 13 */
+ row = ((page >> 1) & 3);
+ printk(KERN_WARNING
+ "Test row %d Table %d %d %d %d %d %d %d %d\n",
+ row, pvt->map[0], pvt->map[1], pvt->map[2],
+ pvt->map[3], pvt->map[4], pvt->map[5],
+ pvt->map[6], pvt->map[7]);
+
+ /* test for channel remapping */
+ for (i = 0; i < 8; i++) {
+ if (pvt->map[i] == row)
+ break;
+ }
+ printk(KERN_WARNING "Test computed row %d\n", i);
+ if (i < 8)
+ row = i;
+ else
+ printk(KERN_WARNING
+ "MC%d: row %d not found in remap table\n",
+ mci->mc_idx, row);
+ } else
+ row = edac_mc_find_csrow_by_page(mci, page);
+ /* 0 = channel A, 1 = channel B */
+ channel = !(error_one & 1);
+
+ if (!pvt->map_type)
+ row = 7 - row;
+ edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel,
+ "e752x CE");
+}
+
+
+static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
+ u32 sec1_add, u16 sec1_syndrome, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
+}
+
+static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add,
+ u32 scrb_add)
+{
+ u32 error_2b, block_page;
+ int row;
+ struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if (error_one & 0x0202) {
+ error_2b = ded_add;
+ /* convert to 4k address */
+ block_page = error_2b >> (PAGE_SHIFT - 4);
+ row = pvt->mc_symmetric ?
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
+ edac_mc_handle_ue(mci, block_page, 0, row,
+ "e752x UE from Read");
+ }
+ if (error_one & 0x0404) {
+ error_2b = scrb_add;
+ /* convert to 4k address */
+ block_page = error_2b >> (PAGE_SHIFT - 4);
+ row = pvt->mc_symmetric ?
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
+ edac_mc_handle_ue(mci, block_page, 0, row,
+ "e752x UE from Scruber");
+ }
+}
+
+static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
+ u32 ded_add, u32 scrb_add, int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_process_ue(mci, error_one, ded_add, scrb_add);
+}
+
+static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
+ int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (!handle_error)
+ return;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+}
+
+static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
+ u32 retry_add)
+{
+ u32 error_1b, page;
+ int row;
+ struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+ error_1b = retry_add;
+ page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
+ row = pvt->mc_symmetric ?
+ ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
+ edac_mc_find_csrow_by_page(mci, page);
+ printk(KERN_WARNING
+ "MC%d: CE page 0x%lx, row %d : Memory read retry\n",
+ mci->mc_idx, (long unsigned int) page, row);
+}
+
+static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
+ u32 retry_add, int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_process_ded_retry(mci, error, retry_add);
+}
+
+static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
+ int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ printk(KERN_WARNING "MC%d: Memory threshold CE\n",
+ mci->mc_idx);
+}
+
+static char *global_message[11] = {
+ "PCI Express C1", "PCI Express C", "PCI Express B1",
+ "PCI Express B", "PCI Express A1", "PCI Express A",
+ "DMA Controler", "HUB Interface", "System Bus",
+ "DRAM Controler", "Internal Buffer"
+};
+
+static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
+
+static void do_global_error(int fatal, u32 errors)
+{
+ int i;
+
+ for (i = 0; i < 11; i++) {
+ if (errors & (1 << i))
+ printk(KERN_WARNING "%sError %s\n",
+ fatal_message[fatal], global_message[i]);
+ }
+}
+
+static inline void global_error(int fatal, u32 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_global_error(fatal, errors);
+}
+
+static char *hub_message[7] = {
+ "HI Address or Command Parity", "HI Illegal Access",
+ "HI Internal Parity", "Out of Range Access",
+ "HI Data Parity", "Enhanced Config Access",
+ "Hub Interface Target Abort"
+};
+
+static void do_hub_error(int fatal, u8 errors)
+{
+ int i;
+
+ for (i = 0; i < 7; i++) {
+ if (errors & (1 << i))
+ printk(KERN_WARNING "%sError %s\n",
+ fatal_message[fatal], hub_message[i]);
+ }
+}
+
+static inline void hub_error(int fatal, u8 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_hub_error(fatal, errors);
+}
+
+static char *membuf_message[4] = {
+ "Internal PMWB to DRAM parity",
+ "Internal PMWB to System Bus Parity",
+ "Internal System Bus or IO to PMWB Parity",
+ "Internal DRAM to PMWB Parity"
+};
+
+static void do_membuf_error(u8 errors)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (errors & (1 << i))
+ printk(KERN_WARNING "Non-Fatal Error %s\n",
+ membuf_message[i]);
+ }
+}
+
+static inline void membuf_error(u8 errors, int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_membuf_error(errors);
+}
+
+#if 0
+char *sysbus_message[10] = {
+ "Addr or Request Parity",
+ "Data Strobe Glitch",
+ "Addr Strobe Glitch",
+ "Data Parity",
+ "Addr Above TOM",
+ "Non DRAM Lock Error",
+ "MCERR", "BINIT",
+ "Memory Parity",
+ "IO Subsystem Parity"
+};
+#endif /* 0 */
+
+static void do_sysbus_error(int fatal, u32 errors)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (errors & (1 << i))
+ printk(KERN_WARNING "%sError System Bus %s\n",
+ fatal_message[fatal], global_message[i]);
+ }
+}
+
+static inline void sysbus_error(int fatal, u32 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_sysbus_error(fatal, errors);
+}
+
+static void e752x_check_hub_interface (struct e752x_error_info *info,
+ int *error_found, int handle_error)
+{
+ u8 stat8;
+
+ //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
+ stat8 = info->hi_ferr;
+ if(stat8 & 0x7f) { /* Error, so process */
+ stat8 &= 0x7f;
+ if(stat8 & 0x2b)
+ hub_error(1, stat8 & 0x2b, error_found, handle_error);
+ if(stat8 & 0x54)
+ hub_error(0, stat8 & 0x54, error_found, handle_error);
+ }
+ //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
+ stat8 = info->hi_nerr;
+ if(stat8 & 0x7f) { /* Error, so process */
+ stat8 &= 0x7f;
+ if (stat8 & 0x2b)
+ hub_error(1, stat8 & 0x2b, error_found, handle_error);
+ if(stat8 & 0x54)
+ hub_error(0, stat8 & 0x54, error_found, handle_error);
+ }
+}
+
+static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
+ int handle_error)
+{
+ u32 stat32, error32;
+
+ //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
+ stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
+
+ if (stat32 == 0)
+ return; /* no errors */
+
+ error32 = (stat32 >> 16) & 0x3ff;
+ stat32 = stat32 & 0x3ff;
+ if(stat32 & 0x083)
+ sysbus_error(1, stat32 & 0x083, error_found, handle_error);
+ if(stat32 & 0x37c)
+ sysbus_error(0, stat32 & 0x37c, error_found, handle_error);
+ if(error32 & 0x083)
+ sysbus_error(1, error32 & 0x083, error_found, handle_error);
+ if(error32 & 0x37c)
+ sysbus_error(0, error32 & 0x37c, error_found, handle_error);
+}
+
+static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
+ int handle_error)
+{
+ u8 stat8;
+
+ stat8 = info->buf_ferr;
+ if (stat8 & 0x0f) { /* Error, so process */
+ stat8 &= 0x0f;
+ membuf_error(stat8, error_found, handle_error);
+ }
+ stat8 = info->buf_nerr;
+ if (stat8 & 0x0f) { /* Error, so process */
+ stat8 &= 0x0f;
+ membuf_error(stat8, error_found, handle_error);
+ }
+}
+
+static void e752x_check_dram (struct mem_ctl_info *mci,
+ struct e752x_error_info *info, int *error_found, int handle_error)
+{
+ u16 error_one, error_next;
+
+ error_one = info->dram_ferr;
+ error_next = info->dram_nerr;
+
+ /* decode and report errors */
+ if(error_one & 0x0101) /* check first error correctable */
+ process_ce(mci, error_one, info->dram_sec1_add,
+ info->dram_sec1_syndrome, error_found,
+ handle_error);
+
+ if(error_next & 0x0101) /* check next error correctable */
+ process_ce(mci, error_next, info->dram_sec2_add,
+ info->dram_sec2_syndrome, error_found,
+ handle_error);
+
+ if(error_one & 0x4040)
+ process_ue_no_info_wr(mci, error_found, handle_error);
+
+ if(error_next & 0x4040)
+ process_ue_no_info_wr(mci, error_found, handle_error);
+
+ if(error_one & 0x2020)
+ process_ded_retry(mci, error_one, info->dram_retr_add,
+ error_found, handle_error);
+
+ if(error_next & 0x2020)
+ process_ded_retry(mci, error_next, info->dram_retr_add,
+ error_found, handle_error);
+
+ if(error_one & 0x0808)
+ process_threshold_ce(mci, error_one, error_found,
+ handle_error);
+
+ if(error_next & 0x0808)
+ process_threshold_ce(mci, error_next, error_found,
+ handle_error);
+
+ if(error_one & 0x0606)
+ process_ue(mci, error_one, info->dram_ded_add,
+ info->dram_scrb_add, error_found, handle_error);
+
+ if(error_next & 0x0606)
+ process_ue(mci, error_next, info->dram_ded_add,
+ info->dram_scrb_add, error_found, handle_error);
+}
+
+static void e752x_get_error_info (struct mem_ctl_info *mci,
+ struct e752x_error_info *info)
+{
+ struct pci_dev *dev;
+ struct e752x_pvt *pvt;
+
+ memset(info, 0, sizeof(*info));
+ pvt = (struct e752x_pvt *) mci->pvt_info;
+ dev = pvt->dev_d0f1;
+
+ pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
+
+ if (info->ferr_global) {
+ pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr);
+ pci_read_config_word(dev, E752X_SYSBUS_FERR,
+ &info->sysbus_ferr);
+ pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
+ pci_read_config_word(dev, E752X_DRAM_FERR,
+ &info->dram_ferr);
+ pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
+ &info->dram_sec1_add);
+ pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
+ &info->dram_sec1_syndrome);
+ pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
+ &info->dram_ded_add);
+ pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
+ &info->dram_scrb_add);
+ pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
+ &info->dram_retr_add);
+
+ if (info->hi_ferr & 0x7f)
+ pci_write_config_byte(dev, E752X_HI_FERR,
+ info->hi_ferr);
+
+ if (info->sysbus_ferr)
+ pci_write_config_word(dev, E752X_SYSBUS_FERR,
+ info->sysbus_ferr);
+
+ if (info->buf_ferr & 0x0f)
+ pci_write_config_byte(dev, E752X_BUF_FERR,
+ info->buf_ferr);
+
+ if (info->dram_ferr)
+ pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
+ info->dram_ferr, info->dram_ferr);
+
+ pci_write_config_dword(dev, E752X_FERR_GLOBAL,
+ info->ferr_global);
+ }
+
+ pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
+
+ if (info->nerr_global) {
+ pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr);
+ pci_read_config_word(dev, E752X_SYSBUS_NERR,
+ &info->sysbus_nerr);
+ pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
+ pci_read_config_word(dev, E752X_DRAM_NERR,
+ &info->dram_nerr);
+ pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
+ &info->dram_sec2_add);
+ pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
+ &info->dram_sec2_syndrome);
+
+ if (info->hi_nerr & 0x7f)
+ pci_write_config_byte(dev, E752X_HI_NERR,
+ info->hi_nerr);
+
+ if (info->sysbus_nerr)
+ pci_write_config_word(dev, E752X_SYSBUS_NERR,
+ info->sysbus_nerr);
+
+ if (info->buf_nerr & 0x0f)
+ pci_write_config_byte(dev, E752X_BUF_NERR,
+ info->buf_nerr);
+
+ if (info->dram_nerr)
+ pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
+ info->dram_nerr, info->dram_nerr);
+
+ pci_write_config_dword(dev, E752X_NERR_GLOBAL,
+ info->nerr_global);
+ }
+}
+
+static int e752x_process_error_info (struct mem_ctl_info *mci,
+ struct e752x_error_info *info, int handle_errors)
+{
+ u32 error32, stat32;
+ int error_found;
+
+ error_found = 0;
+ error32 = (info->ferr_global >> 18) & 0x3ff;
+ stat32 = (info->ferr_global >> 4) & 0x7ff;
+
+ if (error32)
+ global_error(1, error32, &error_found, handle_errors);
+
+ if (stat32)
+ global_error(0, stat32, &error_found, handle_errors);
+
+ error32 = (info->nerr_global >> 18) & 0x3ff;
+ stat32 = (info->nerr_global >> 4) & 0x7ff;
+
+ if (error32)
+ global_error(1, error32, &error_found, handle_errors);
+
+ if (stat32)
+ global_error(0, stat32, &error_found, handle_errors);
+
+ e752x_check_hub_interface(info, &error_found, handle_errors);
+ e752x_check_sysbus(info, &error_found, handle_errors);
+ e752x_check_membuf(info, &error_found, handle_errors);
+ e752x_check_dram(mci, info, &error_found, handle_errors);
+ return error_found;
+}
+
+static void e752x_check(struct mem_ctl_info *mci)
+{
+ struct e752x_error_info info;
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ e752x_get_error_info(mci, &info);
+ e752x_process_error_info(mci, &info, 1);
+}
+
+static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ u16 pci_data, stat;
+ u32 stat32;
+ u16 stat16;
+ u8 stat8;
+ struct mem_ctl_info *mci = NULL;
+ struct e752x_pvt *pvt = NULL;
+ u16 ddrcsr;
+ u32 drc;
+ int drc_chan; /* Number of channels 0=1chan,1=2chan */
+ int drc_drbg; /* DRB granularity 0=64mb,1=128mb */
+ int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+ u32 dra;
+ unsigned long last_cumul_size;
+ struct pci_dev *pres_dev;
+ struct pci_dev *dev = NULL;
+
+ debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
+ debugf0("Starting Probe1\n");
+
+ /* enable device 0 function 1 */
+ pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
+ stat8 |= (1 << 5);
+ pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
+
+ /* need to find out the number of channels */
+ pci_read_config_dword(pdev, E752X_DRC, &drc);
+ pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
+ /* FIXME: should check >>12 or 0xf, true for all? */
+ /* Dual channel = 1, Single channel = 0 */
+ drc_chan = (((ddrcsr >> 12) & 3) == 3);
+ drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
+ drc_ddim = (drc >> 20) & 0x3;
+
+ mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
+
+ if (mci == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+ mci->mtype_cap = MEM_FLAG_RDDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
+ EDAC_FLAG_S4ECD4ED;
+ /* FIXME - what if different memory types are in different csrows? */
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.5.2.11 $";
+ mci->pdev = pdev;
+
+ debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+ pvt = (struct e752x_pvt *) mci->pvt_info;
+ pvt->dev_info = &e752x_devs[dev_idx];
+ pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_info->err_dev,
+ pvt->bridge_ck);
+ if (pvt->bridge_ck == NULL)
+ pvt->bridge_ck = pci_scan_single_device(pdev->bus,
+ PCI_DEVFN(0, 1));
+ if (pvt->bridge_ck == NULL) {
+ printk(KERN_ERR "MC: error reporting device not found:"
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
+ goto fail;
+ }
+ pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
+
+ debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
+ mci->ctl_name = pvt->dev_info->ctl_name;
+ mci->edac_check = e752x_check;
+ mci->ctl_page_to_phys = ctl_page_to_phys;
+
+ /* find out the device types */
+ pci_read_config_dword(pdev, E752X_DRA, &dra);
+
+ /*
+ * The dram row boundary (DRB) reg values are boundary address for
+ * each DRAM row with a granularity of 64 or 128MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ */
+ for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+ u8 value;
+ u32 cumul_size;
+ /* mem_dev 0=x8, 1=x4 */
+ int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
+ struct csrow_info *csrow = &mci->csrows[index];
+
+ mem_dev = (mem_dev == 2);
+ pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
+ /* convert a 128 or 64 MiB DRB to a page size. */
+ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+ debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+ __func__, index, cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ csrow->mtype = MEM_RDDR; /* only one type supported */
+ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ csrow->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ csrow->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ csrow->edac_mode = EDAC_NONE;
+ }
+
+ /* Fill in the memory map table */
+ {
+ u8 value;
+ u8 last = 0;
+ u8 row = 0;
+ for (index = 0; index < 8; index += 2) {
+
+ pci_read_config_byte(mci->pdev, E752X_DRB + index,
+ &value);
+ /* test if there is a dimm in this slot */
+ if (value == last) {
+ /* no dimm in the slot, so flag it as empty */
+ pvt->map[index] = 0xff;
+ pvt->map[index + 1] = 0xff;
+ } else { /* there is a dimm in the slot */
+ pvt->map[index] = row;
+ row++;
+ last = value;
+ /* test the next value to see if the dimm is
+ double sided */
+ pci_read_config_byte(mci->pdev,
+ E752X_DRB + index + 1,
+ &value);
+ pvt->map[index + 1] = (value == last) ?
+ 0xff : /* the dimm is single sided,
+ so flag as empty */
+ row; /* this is a double sided dimm
+ to save the next row # */
+ row++;
+ last = value;
+ }
+ }
+ }
+
+ /* set the map type. 1 = normal, 0 = reversed */
+ pci_read_config_byte(mci->pdev, E752X_DRM, &stat8);
+ pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
+
+ mci->edac_cap |= EDAC_FLAG_NONE;
+
+ debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
+ __func__);
+ /* load the top of low memory, remap base, and remap limit vars */
+ pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data);
+ pvt->tolm = ((u32) pci_data) << 4;
+ pci_read_config_word(mci->pdev, E752X_REMAPBASE, &pci_data);
+ pvt->remapbase = ((u32) pci_data) << 14;
+ pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data);
+ pvt->remaplimit = ((u32) pci_data) << 14;
+ printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+ pvt->remapbase, pvt->remaplimit);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n",
+ __func__);
+ goto fail;
+ }
+
+ /* Walk through the PCI table and clear errors */
+ switch (dev_idx) {
+ case E7520:
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_7520_0, NULL);
+ break;
+ case E7525:
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_7525_0, NULL);
+ break;
+ case E7320:
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_7320_0, NULL);
+ break;
+ }
+
+
+ pvt->dev_d0f0 = dev;
+ for (pres_dev = dev;
+ ((struct pci_dev *) pres_dev->global_list.next != dev);
+ pres_dev = (struct pci_dev *) pres_dev->global_list.next) {
+ pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32);
+ stat = (u16) (stat32 >> 16);
+ /* clear any error bits */
+ if (stat32 & ((1 << 6) + (1 << 8)))
+ pci_write_config_word(pres_dev, PCI_STATUS, stat);
+ }
+ /* find the error reporting device and clear errors */
+ dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
+ /* Turn off error disable & SMI in case the BIOS turned it on */
+ pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
+ pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
+ pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
+ pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
+ pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
+ pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
+ pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
+ pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
+ /* clear other MCH errors */
+ pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32);
+ pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32);
+ pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32);
+ pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32);
+ pci_read_config_byte(dev, E752X_HI_FERR, &stat8);
+ pci_write_config_byte(dev, E752X_HI_FERR, stat8);
+ pci_read_config_byte(dev, E752X_HI_NERR, &stat8);
+ pci_write_config_byte(dev, E752X_HI_NERR, stat8);
+ pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32);
+ pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32);
+ pci_read_config_byte(dev, E752X_BUF_FERR, &stat8);
+ pci_write_config_byte(dev, E752X_BUF_FERR, stat8);
+ pci_read_config_byte(dev, E752X_BUF_NERR, &stat8);
+ pci_write_config_byte(dev, E752X_BUF_NERR, stat8);
+ pci_read_config_word(dev, E752X_DRAM_FERR, &stat16);
+ pci_write_config_word(dev, E752X_DRAM_FERR, stat16);
+ pci_read_config_word(dev, E752X_DRAM_NERR, &stat16);
+ pci_write_config_word(dev, E752X_DRAM_NERR, stat16);
+
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ if (mci) {
+ if (pvt->dev_d0f0)
+ pci_dev_put(pvt->dev_d0f0);
+ if (pvt->dev_d0f1)
+ pci_dev_put(pvt->dev_d0f1);
+ if (pvt->bridge_ck)
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+ }
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit e752x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* wake up and enable device */
+ if(pci_enable_device(pdev) < 0)
+ return -EIO;
+ return e752x_probe1(pdev, ent->driver_data);
+}
+
+
+static void __devexit e752x_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct e752x_pvt *pvt;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+ return;
+
+ if (edac_mc_del_mc(mci))
+ return;
+
+ pvt = (struct e752x_pvt *) mci->pvt_info;
+ pci_dev_put(pvt->dev_d0f0);
+ pci_dev_put(pvt->dev_d0f1);
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7520},
+ {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7525},
+ {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7320},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
+
+
+static struct pci_driver e752x_driver = {
+ name: BS_MOD_STR,
+ probe: e752x_init_one,
+ remove: __devexit_p(e752x_remove_one),
+ id_table: e752x_pci_tbl,
+};
+
+
+static int __init e752x_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ pci_rc = pci_register_driver(&e752x_driver);
+ return (pci_rc < 0) ? pci_rc : 0;
+}
+
+
+static void __exit e752x_exit(void)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&e752x_driver);
+}
+
+
+module_init(e752x_init);
+module_exit(e752x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
+MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
new file mode 100644
index 000000000000..d5e320dfc66f
--- /dev/null
+++ b/drivers/edac/e7xxx_edac.c
@@ -0,0 +1,558 @@
+/*
+ * Intel e7xxx Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e7xxx_chips" below for supported chipsets
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Contributors:
+ * Eric Biederman (Linux Networx)
+ * Tom Zimmerman (Linux Networx)
+ * Jim Garlick (Lawrence Livermore National Labs)
+ * Dave Peterson (Lawrence Livermore National Labs)
+ * That One Guy (Some other place)
+ * Wang Zhenyu (intel.com)
+ *
+ * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_0
+#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
+#endif /* PCI_DEVICE_ID_INTEL_7205_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
+#define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
+#endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_0
+#define PCI_DEVICE_ID_INTEL_7500_0 0x2540
+#endif /* PCI_DEVICE_ID_INTEL_7500_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
+#define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
+#endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_0
+#define PCI_DEVICE_ID_INTEL_7501_0 0x254c
+#endif /* PCI_DEVICE_ID_INTEL_7501_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
+#define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
+#endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_0
+#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
+#endif /* PCI_DEVICE_ID_INTEL_7505_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
+#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
+#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
+
+
+#define E7XXX_NR_CSROWS 8 /* number of csrows */
+#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
+
+
+/* E7XXX register addresses - device 0 function 0 */
+#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
+#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
+ /*
+ * 31 Device width row 7 0=x8 1=x4
+ * 27 Device width row 6
+ * 23 Device width row 5
+ * 19 Device width row 4
+ * 15 Device width row 3
+ * 11 Device width row 2
+ * 7 Device width row 1
+ * 3 Device width row 0
+ */
+#define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */
+ /*
+ * 22 Number channels 0=1,1=2
+ * 19:18 DRB Granularity 32/64MB
+ */
+#define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
+#define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
+#define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
+
+/* E7XXX register addresses - device 0 function 1 */
+#define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */
+#define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */
+#define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */
+ /* error address register (32b) */
+ /*
+ * 31:28 Reserved
+ * 27:6 CE address (4k block 33:12)
+ * 5:0 Reserved
+ */
+#define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */
+ /* error address register (32b) */
+ /*
+ * 31:28 Reserved
+ * 27:6 CE address (4k block 33:12)
+ * 5:0 Reserved
+ */
+#define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
+ /* error syndrome register (16b) */
+
+enum e7xxx_chips {
+ E7500 = 0,
+ E7501,
+ E7505,
+ E7205,
+};
+
+
+struct e7xxx_pvt {
+ struct pci_dev *bridge_ck;
+ u32 tolm;
+ u32 remapbase;
+ u32 remaplimit;
+ const struct e7xxx_dev_info *dev_info;
+};
+
+
+struct e7xxx_dev_info {
+ u16 err_dev;
+ const char *ctl_name;
+};
+
+
+struct e7xxx_error_info {
+ u8 dram_ferr;
+ u8 dram_nerr;
+ u32 dram_celog_add;
+ u16 dram_celog_syndrome;
+ u32 dram_uelog_add;
+};
+
+static const struct e7xxx_dev_info e7xxx_devs[] = {
+ [E7500] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
+ .ctl_name = "E7500"},
+ [E7501] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
+ .ctl_name = "E7501"},
+ [E7505] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
+ .ctl_name = "E7505"},
+ [E7205] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
+ .ctl_name = "E7205"},
+};
+
+
+/* FIXME - is this valid for both SECDED and S4ECD4ED? */
+static inline int e7xxx_find_channel(u16 syndrome)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if ((syndrome & 0xff00) == 0)
+ return 0;
+ if ((syndrome & 0x00ff) == 0)
+ return 1;
+ if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
+ return 0;
+ return 1;
+}
+
+
+static unsigned long
+ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page)
+{
+ u32 remap;
+ struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if ((page < pvt->tolm) ||
+ ((page >= 0x100000) && (page < pvt->remapbase)))
+ return page;
+ remap = (page - pvt->tolm) + pvt->remapbase;
+ if (remap < pvt->remaplimit)
+ return remap;
+ printk(KERN_ERR "Invalid page %lx - out of range\n", page);
+ return pvt->tolm - 1;
+}
+
+
+static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+ u32 error_1b, page;
+ u16 syndrome;
+ int row;
+ int channel;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* read the error address */
+ error_1b = info->dram_celog_add;
+ /* FIXME - should use PAGE_SHIFT */
+ page = error_1b >> 6; /* convert the address to 4k page */
+ /* read the syndrome */
+ syndrome = info->dram_celog_syndrome;
+ /* FIXME - check for -1 */
+ row = edac_mc_find_csrow_by_page(mci, page);
+ /* convert syndrome to channel */
+ channel = e7xxx_find_channel(syndrome);
+ edac_mc_handle_ce(mci, page, 0, syndrome, row, channel,
+ "e7xxx CE");
+}
+
+
+static void process_ce_no_info(struct mem_ctl_info *mci)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+}
+
+
+static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+ u32 error_2b, block_page;
+ int row;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* read the error address */
+ error_2b = info->dram_uelog_add;
+ /* FIXME - should use PAGE_SHIFT */
+ block_page = error_2b >> 6; /* convert to 4k address */
+ row = edac_mc_find_csrow_by_page(mci, block_page);
+ edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+}
+
+
+static void process_ue_no_info(struct mem_ctl_info *mci)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+}
+
+
+static void e7xxx_get_error_info (struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info)
+{
+ struct e7xxx_pvt *pvt;
+
+ pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
+ &info->dram_ferr);
+ pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
+ &info->dram_nerr);
+
+ if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
+ pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
+ &info->dram_celog_add);
+ pci_read_config_word(pvt->bridge_ck,
+ E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome);
+ }
+
+ if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
+ pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
+ &info->dram_uelog_add);
+
+ if (info->dram_ferr & 3)
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03,
+ 0x03);
+
+ if (info->dram_nerr & 3)
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03,
+ 0x03);
+}
+
+
+static int e7xxx_process_error_info (struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info, int handle_errors)
+{
+ int error_found;
+
+ error_found = 0;
+
+ /* decode and report errors */
+ if (info->dram_ferr & 1) { /* check first error correctable */
+ error_found = 1;
+
+ if (handle_errors)
+ process_ce(mci, info);
+ }
+
+ if (info->dram_ferr & 2) { /* check first error uncorrectable */
+ error_found = 1;
+
+ if (handle_errors)
+ process_ue(mci, info);
+ }
+
+ if (info->dram_nerr & 1) { /* check next error correctable */
+ error_found = 1;
+
+ if (handle_errors) {
+ if (info->dram_ferr & 1)
+ process_ce_no_info(mci);
+ else
+ process_ce(mci, info);
+ }
+ }
+
+ if (info->dram_nerr & 2) { /* check next error uncorrectable */
+ error_found = 1;
+
+ if (handle_errors) {
+ if (info->dram_ferr & 2)
+ process_ue_no_info(mci);
+ else
+ process_ue(mci, info);
+ }
+ }
+
+ return error_found;
+}
+
+
+static void e7xxx_check(struct mem_ctl_info *mci)
+{
+ struct e7xxx_error_info info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ e7xxx_get_error_info(mci, &info);
+ e7xxx_process_error_info(mci, &info, 1);
+}
+
+
+static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ u16 pci_data;
+ struct mem_ctl_info *mci = NULL;
+ struct e7xxx_pvt *pvt = NULL;
+ u32 drc;
+ int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */
+ int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */
+ int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+ u32 dra;
+ unsigned long last_cumul_size;
+
+
+ debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
+
+ /* need to find out the number of channels */
+ pci_read_config_dword(pdev, E7XXX_DRC, &drc);
+ /* only e7501 can be single channel */
+ if (dev_idx == E7501) {
+ drc_chan = ((drc >> 22) & 0x1);
+ drc_drbg = (drc >> 18) & 0x3;
+ }
+ drc_ddim = (drc >> 20) & 0x3;
+
+ mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
+
+ if (mci == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+ mci->mtype_cap = MEM_FLAG_RDDR;
+ mci->edac_ctl_cap =
+ EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED;
+ /* FIXME - what if different memory types are in different csrows? */
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.5.2.9 $";
+ mci->pdev = pdev;
+
+ debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+ pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ pvt->dev_info = &e7xxx_devs[dev_idx];
+ pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_info->err_dev,
+ pvt->bridge_ck);
+ if (!pvt->bridge_ck) {
+ printk(KERN_ERR
+ "MC: error reporting device not found:"
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
+ goto fail;
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
+ mci->ctl_name = pvt->dev_info->ctl_name;
+
+ mci->edac_check = e7xxx_check;
+ mci->ctl_page_to_phys = ctl_page_to_phys;
+
+ /* find out the device types */
+ pci_read_config_dword(pdev, E7XXX_DRA, &dra);
+
+ /*
+ * The dram row boundary (DRB) reg values are boundary address
+ * for each DRAM row with a granularity of 32 or 64MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ */
+ for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+ u8 value;
+ u32 cumul_size;
+ /* mem_dev 0=x8, 1=x4 */
+ int mem_dev = (dra >> (index * 4 + 3)) & 0x1;
+ struct csrow_info *csrow = &mci->csrows[index];
+
+ pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value);
+ /* convert a 64 or 32 MiB DRB to a page size. */
+ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+ debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+ __func__, index, cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ csrow->mtype = MEM_RDDR; /* only one type supported */
+ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ csrow->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ csrow->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ csrow->edac_mode = EDAC_NONE;
+ }
+
+ mci->edac_cap |= EDAC_FLAG_NONE;
+
+ debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
+ __func__);
+ /* load the top of low memory, remap base, and remap limit vars */
+ pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data);
+ pvt->tolm = ((u32) pci_data) << 4;
+ pci_read_config_word(mci->pdev, E7XXX_REMAPBASE, &pci_data);
+ pvt->remapbase = ((u32) pci_data) << 14;
+ pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data);
+ pvt->remaplimit = ((u32) pci_data) << 14;
+ printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+ pvt->remapbase, pvt->remaplimit);
+
+ /* clear any pending errors, or initial state bits */
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
+
+ if (edac_mc_add_mc(mci) != 0) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n",
+ __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ if (mci != NULL) {
+ if(pvt != NULL && pvt->bridge_ck)
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+ }
+
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit
+e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* wake up and enable device */
+ return pci_enable_device(pdev) ?
+ -EIO : e7xxx_probe1(pdev, ent->driver_data);
+}
+
+
+static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct e7xxx_pvt *pvt;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) &&
+ edac_mc_del_mc(mci)) {
+ pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+ }
+}
+
+
+static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7205},
+ {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7500},
+ {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7501},
+ {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7505},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
+
+
+static struct pci_driver e7xxx_driver = {
+ .name = BS_MOD_STR,
+ .probe = e7xxx_init_one,
+ .remove = __devexit_p(e7xxx_remove_one),
+ .id_table = e7xxx_pci_tbl,
+};
+
+
+static int __init e7xxx_init(void)
+{
+ return pci_register_driver(&e7xxx_driver);
+}
+
+
+static void __exit e7xxx_exit(void)
+{
+ pci_unregister_driver(&e7xxx_driver);
+}
+
+module_init(e7xxx_init);
+module_exit(e7xxx_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
+ "Based on.work by Dan Hollis et al");
+MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
new file mode 100644
index 000000000000..4be9bd0a1267
--- /dev/null
+++ b/drivers/edac/edac_mc.c
@@ -0,0 +1,2209 @@
+/*
+ * edac_mc kernel module
+ * (C) 2005 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Modified by Dave Peterson and Doug Thompson
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/edac.h>
+
+#include "edac_mc.h"
+
+#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__
+
+#ifdef CONFIG_EDAC_DEBUG
+/* Values of 0 to 4 will generate output */
+int edac_debug_level = 1;
+EXPORT_SYMBOL(edac_debug_level);
+#endif
+
+/* EDAC Controls, setable by module parameter, and sysfs */
+static int log_ue = 1;
+static int log_ce = 1;
+static int panic_on_ue = 1;
+static int poll_msec = 1000;
+
+static int check_pci_parity = 0; /* default YES check PCI parity */
+static int panic_on_pci_parity; /* default no panic on PCI Parity */
+static atomic_t pci_parity_count = ATOMIC_INIT(0);
+
+/* lock to memory controller's control array */
+static DECLARE_MUTEX(mem_ctls_mutex);
+static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
+
+/* Structure of the whitelist and blacklist arrays */
+struct edac_pci_device_list {
+ unsigned int vendor; /* Vendor ID */
+ unsigned int device; /* Deviice ID */
+};
+
+
+#define MAX_LISTED_PCI_DEVICES 32
+
+/* List of PCI devices (vendor-id:device-id) that should be skipped */
+static struct edac_pci_device_list pci_blacklist[MAX_LISTED_PCI_DEVICES];
+static int pci_blacklist_count;
+
+/* List of PCI devices (vendor-id:device-id) that should be scanned */
+static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES];
+static int pci_whitelist_count ;
+
+/* START sysfs data and methods */
+
+static const char *mem_types[] = {
+ [MEM_EMPTY] = "Empty",
+ [MEM_RESERVED] = "Reserved",
+ [MEM_UNKNOWN] = "Unknown",
+ [MEM_FPM] = "FPM",
+ [MEM_EDO] = "EDO",
+ [MEM_BEDO] = "BEDO",
+ [MEM_SDR] = "Unbuffered-SDR",
+ [MEM_RDR] = "Registered-SDR",
+ [MEM_DDR] = "Unbuffered-DDR",
+ [MEM_RDDR] = "Registered-DDR",
+ [MEM_RMBS] = "RMBS"
+};
+
+static const char *dev_types[] = {
+ [DEV_UNKNOWN] = "Unknown",
+ [DEV_X1] = "x1",
+ [DEV_X2] = "x2",
+ [DEV_X4] = "x4",
+ [DEV_X8] = "x8",
+ [DEV_X16] = "x16",
+ [DEV_X32] = "x32",
+ [DEV_X64] = "x64"
+};
+
+static const char *edac_caps[] = {
+ [EDAC_UNKNOWN] = "Unknown",
+ [EDAC_NONE] = "None",
+ [EDAC_RESERVED] = "Reserved",
+ [EDAC_PARITY] = "PARITY",
+ [EDAC_EC] = "EC",
+ [EDAC_SECDED] = "SECDED",
+ [EDAC_S2ECD2ED] = "S2ECD2ED",
+ [EDAC_S4ECD4ED] = "S4ECD4ED",
+ [EDAC_S8ECD8ED] = "S8ECD8ED",
+ [EDAC_S16ECD16ED] = "S16ECD16ED"
+};
+
+
+/* sysfs object: /sys/devices/system/edac */
+static struct sysdev_class edac_class = {
+ set_kset_name("edac"),
+};
+
+/* sysfs objects:
+ * /sys/devices/system/edac/mc
+ * /sys/devices/system/edac/pci
+ */
+static struct kobject edac_memctrl_kobj;
+static struct kobject edac_pci_kobj;
+
+/*
+ * /sys/devices/system/edac/mc;
+ * data structures and methods
+ */
+static ssize_t memctrl_string_show(void *ptr, char *buffer)
+{
+ char *value = (char*) ptr;
+ return sprintf(buffer, "%s\n", value);
+}
+
+static ssize_t memctrl_int_show(void *ptr, char *buffer)
+{
+ int *value = (int*) ptr;
+ return sprintf(buffer, "%d\n", *value);
+}
+
+static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
+{
+ int *value = (int*) ptr;
+
+ if (isdigit(*buffer))
+ *value = simple_strtoul(buffer, NULL, 0);
+
+ return count;
+}
+
+struct memctrl_dev_attribute {
+ struct attribute attr;
+ void *value;
+ ssize_t (*show)(void *,char *);
+ ssize_t (*store)(void *, const char *, size_t);
+};
+
+/* Set of show/store abstract level functions for memory control object */
+static ssize_t
+memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer)
+{
+ struct memctrl_dev_attribute *memctrl_dev;
+ memctrl_dev = (struct memctrl_dev_attribute*)attr;
+
+ if (memctrl_dev->show)
+ return memctrl_dev->show(memctrl_dev->value, buffer);
+ return -EIO;
+}
+
+static ssize_t
+memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct memctrl_dev_attribute *memctrl_dev;
+ memctrl_dev = (struct memctrl_dev_attribute*)attr;
+
+ if (memctrl_dev->store)
+ return memctrl_dev->store(memctrl_dev->value, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops memctrlfs_ops = {
+ .show = memctrl_dev_show,
+ .store = memctrl_dev_store
+};
+
+#define MEMCTRL_ATTR(_name,_mode,_show,_store) \
+struct memctrl_dev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = &_name, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store) \
+struct memctrl_dev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = _data, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* cwrow<id> attribute f*/
+MEMCTRL_STRING_ATTR(mc_version,EDAC_MC_VERSION,S_IRUGO,memctrl_string_show,NULL);
+
+/* csrow<id> control files */
+MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+
+
+/* Base Attributes of the memory ECC object */
+static struct memctrl_dev_attribute *memctrl_attr[] = {
+ &attr_panic_on_ue,
+ &attr_log_ue,
+ &attr_log_ce,
+ &attr_poll_msec,
+ &attr_mc_version,
+ NULL,
+};
+
+/* Main MC kobject release() function */
+static void edac_memctrl_master_release(struct kobject *kobj)
+{
+ debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
+}
+
+static struct kobj_type ktype_memctrl = {
+ .release = edac_memctrl_master_release,
+ .sysfs_ops = &memctrlfs_ops,
+ .default_attrs = (struct attribute **) memctrl_attr,
+};
+
+
+/* Initialize the main sysfs entries for edac:
+ * /sys/devices/system/edac
+ *
+ * and children
+ *
+ * Return: 0 SUCCESS
+ * !0 FAILURE
+ */
+static int edac_sysfs_memctrl_setup(void)
+{
+ int err=0;
+
+ debugf1("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* create the /sys/devices/system/edac directory */
+ err = sysdev_class_register(&edac_class);
+ if (!err) {
+ /* Init the MC's kobject */
+ memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
+ kobject_init(&edac_memctrl_kobj);
+
+ edac_memctrl_kobj.parent = &edac_class.kset.kobj;
+ edac_memctrl_kobj.ktype = &ktype_memctrl;
+
+ /* generate sysfs "..../edac/mc" */
+ err = kobject_set_name(&edac_memctrl_kobj,"mc");
+ if (!err) {
+ /* FIXME: maybe new sysdev_create_subdir() */
+ err = kobject_register(&edac_memctrl_kobj);
+ if (err) {
+ debugf1("Failed to register '.../edac/mc'\n");
+ } else {
+ debugf1("Registered '.../edac/mc' kobject\n");
+ }
+ }
+ } else {
+ debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err);
+ }
+
+ return err;
+}
+
+/*
+ * MC teardown:
+ * the '..../edac/mc' kobject followed by '..../edac' itself
+ */
+static void edac_sysfs_memctrl_teardown(void)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* Unregister the MC's kobject */
+ kobject_unregister(&edac_memctrl_kobj);
+
+ /* release the master edac mc kobject */
+ kobject_put(&edac_memctrl_kobj);
+
+ /* Unregister the 'edac' object */
+ sysdev_class_unregister(&edac_class);
+}
+
+/*
+ * /sys/devices/system/edac/pci;
+ * data structures and methods
+ */
+
+struct list_control {
+ struct edac_pci_device_list *list;
+ int *count;
+};
+
+/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */
+static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
+{
+ struct list_control *listctl;
+ struct edac_pci_device_list *list;
+ char *p = buffer;
+ int len=0;
+ int i;
+
+ listctl = ptr;
+ list = listctl->list;
+
+ for (i = 0; i < *(listctl->count); i++, list++ ) {
+ if (len > 0)
+ len += snprintf(p + len, (PAGE_SIZE-len), ",");
+
+ len += snprintf(p + len,
+ (PAGE_SIZE-len),
+ "%x:%x",
+ list->vendor,list->device);
+ }
+
+ len += snprintf(p + len,(PAGE_SIZE-len), "\n");
+
+ return (ssize_t) len;
+}
+
+/**
+ *
+ * Scan string from **s to **e looking for one 'vendor:device' tuple
+ * where each field is a hex value
+ *
+ * return 0 if an entry is NOT found
+ * return 1 if an entry is found
+ * fill in *vendor_id and *device_id with values found
+ *
+ * In both cases, make sure *s has been moved forward toward *e
+ */
+static int parse_one_device(const char **s,const char **e,
+ unsigned int *vendor_id, unsigned int *device_id)
+{
+ const char *runner, *p;
+
+ /* if null byte, we are done */
+ if (!**s) {
+ (*s)++; /* keep *s moving */
+ return 0;
+ }
+
+ /* skip over newlines & whitespace */
+ if ((**s == '\n') || isspace(**s)) {
+ (*s)++;
+ return 0;
+ }
+
+ if (!isxdigit(**s)) {
+ (*s)++;
+ return 0;
+ }
+
+ /* parse vendor_id */
+ runner = *s;
+ while (runner < *e) {
+ /* scan for vendor:device delimiter */
+ if (*runner == ':') {
+ *vendor_id = simple_strtol((char*) *s, (char**) &p, 16);
+ runner = p + 1;
+ break;
+ }
+ runner++;
+ }
+
+ if (!isxdigit(*runner)) {
+ *s = ++runner;
+ return 0;
+ }
+
+ /* parse device_id */
+ if (runner < *e) {
+ *device_id = simple_strtol((char*)runner, (char**)&p, 16);
+ runner = p;
+ }
+
+ *s = runner;
+
+ return 1;
+}
+
+static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
+ size_t count)
+{
+ struct list_control *listctl;
+ struct edac_pci_device_list *list;
+ unsigned int vendor_id, device_id;
+ const char *s, *e;
+ int *index;
+
+ s = (char*)buffer;
+ e = s + count;
+
+ listctl = ptr;
+ list = listctl->list;
+ index = listctl->count;
+
+ *index = 0;
+ while (*index < MAX_LISTED_PCI_DEVICES) {
+
+ if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
+ list[ *index ].vendor = vendor_id;
+ list[ *index ].device = device_id;
+ (*index)++;
+ }
+
+ /* check for all data consume */
+ if (s >= e)
+ break;
+ }
+
+ return count;
+}
+
+static ssize_t edac_pci_int_show(void *ptr, char *buffer)
+{
+ int *value = ptr;
+ return sprintf(buffer,"%d\n",*value);
+}
+
+static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
+{
+ int *value = ptr;
+
+ if (isdigit(*buffer))
+ *value = simple_strtoul(buffer,NULL,0);
+
+ return count;
+}
+
+struct edac_pci_dev_attribute {
+ struct attribute attr;
+ void *value;
+ ssize_t (*show)(void *,char *);
+ ssize_t (*store)(void *, const char *,size_t);
+};
+
+/* Set of show/store abstract level functions for PCI Parity object */
+static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
+
+ if (edac_pci_dev->show)
+ return edac_pci_dev->show(edac_pci_dev->value, buffer);
+ return -EIO;
+}
+
+static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
+
+ if (edac_pci_dev->show)
+ return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops edac_pci_sysfs_ops = {
+ .show = edac_pci_dev_show,
+ .store = edac_pci_dev_store
+};
+
+
+#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
+struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = &_name, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
+struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = _data, \
+ .show = _show, \
+ .store = _store, \
+};
+
+static struct list_control pci_whitelist_control = {
+ .list = pci_whitelist,
+ .count = &pci_whitelist_count
+};
+
+static struct list_control pci_blacklist_control = {
+ .list = pci_blacklist,
+ .count = &pci_blacklist_count
+};
+
+/* whitelist attribute */
+EDAC_PCI_STRING_ATTR(pci_parity_whitelist,
+ &pci_whitelist_control,
+ S_IRUGO|S_IWUSR,
+ edac_pci_list_string_show,
+ edac_pci_list_string_store);
+
+EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
+ &pci_blacklist_control,
+ S_IRUGO|S_IWUSR,
+ edac_pci_list_string_show,
+ edac_pci_list_string_store);
+
+/* PCI Parity control files */
+EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
+EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
+EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL);
+
+/* Base Attributes of the memory ECC object */
+static struct edac_pci_dev_attribute *edac_pci_attr[] = {
+ &edac_pci_attr_check_pci_parity,
+ &edac_pci_attr_panic_on_pci_parity,
+ &edac_pci_attr_pci_parity_count,
+ &edac_pci_attr_pci_parity_whitelist,
+ &edac_pci_attr_pci_parity_blacklist,
+ NULL,
+};
+
+/* No memory to release */
+static void edac_pci_release(struct kobject *kobj)
+{
+ debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__);
+}
+
+static struct kobj_type ktype_edac_pci = {
+ .release = edac_pci_release,
+ .sysfs_ops = &edac_pci_sysfs_ops,
+ .default_attrs = (struct attribute **) edac_pci_attr,
+};
+
+/**
+ * edac_sysfs_pci_setup()
+ *
+ */
+static int edac_sysfs_pci_setup(void)
+{
+ int err;
+
+ debugf1("MC: " __FILE__ ": %s()\n", __func__);
+
+ memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
+
+ kobject_init(&edac_pci_kobj);
+ edac_pci_kobj.parent = &edac_class.kset.kobj;
+ edac_pci_kobj.ktype = &ktype_edac_pci;
+
+ err = kobject_set_name(&edac_pci_kobj, "pci");
+ if (!err) {
+ /* Instanstiate the csrow object */
+ /* FIXME: maybe new sysdev_create_subdir() */
+ err = kobject_register(&edac_pci_kobj);
+ if (err)
+ debugf1("Failed to register '.../edac/pci'\n");
+ else
+ debugf1("Registered '.../edac/pci' kobject\n");
+ }
+ return err;
+}
+
+
+static void edac_sysfs_pci_teardown(void)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ kobject_unregister(&edac_pci_kobj);
+ kobject_put(&edac_pci_kobj);
+}
+
+/* EDAC sysfs CSROW data structures and methods */
+
+/* Set of more detailed csrow<id> attribute show/store functions */
+static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
+{
+ ssize_t size = 0;
+
+ if (csrow->nr_channels > 0) {
+ size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
+ csrow->channels[0].label);
+ }
+ return size;
+}
+
+static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
+{
+ ssize_t size = 0;
+
+ if (csrow->nr_channels > 0) {
+ size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
+ csrow->channels[1].label);
+ }
+ return size;
+}
+
+static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
+ const char *data, size_t size)
+{
+ ssize_t max_size = 0;
+
+ if (csrow->nr_channels > 0) {
+ max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
+ strncpy(csrow->channels[0].label, data, max_size);
+ csrow->channels[0].label[max_size] = '\0';
+ }
+ return size;
+}
+
+static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
+ const char *data, size_t size)
+{
+ ssize_t max_size = 0;
+
+ if (csrow->nr_channels > 1) {
+ max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
+ strncpy(csrow->channels[1].label, data, max_size);
+ csrow->channels[1].label[max_size] = '\0';
+ }
+ return max_size;
+}
+
+static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%u\n", csrow->ue_count);
+}
+
+static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%u\n", csrow->ce_count);
+}
+
+static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
+{
+ ssize_t size = 0;
+
+ if (csrow->nr_channels > 0) {
+ size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
+ }
+ return size;
+}
+
+static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
+{
+ ssize_t size = 0;
+
+ if (csrow->nr_channels > 1) {
+ size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
+ }
+ return size;
+}
+
+static ssize_t csrow_size_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
+}
+
+static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%s\n", mem_types[csrow->mtype]);
+}
+
+static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%s\n", dev_types[csrow->dtype]);
+}
+
+static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
+}
+
+struct csrowdev_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct csrow_info *,char *);
+ ssize_t (*store)(struct csrow_info *, const char *,size_t);
+};
+
+#define to_csrow(k) container_of(k, struct csrow_info, kobj)
+#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
+
+/* Set of show/store higher level functions for csrow objects */
+static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct csrow_info *csrow = to_csrow(kobj);
+ struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+ if (csrowdev_attr->show)
+ return csrowdev_attr->show(csrow, buffer);
+ return -EIO;
+}
+
+static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct csrow_info *csrow = to_csrow(kobj);
+ struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
+
+ if (csrowdev_attr->store)
+ return csrowdev_attr->store(csrow, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops csrowfs_ops = {
+ .show = csrowdev_show,
+ .store = csrowdev_store
+};
+
+#define CSROWDEV_ATTR(_name,_mode,_show,_store) \
+struct csrowdev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* cwrow<id>/attribute files */
+CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL);
+CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL);
+CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL);
+CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL);
+CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL);
+CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL);
+CSROWDEV_ATTR(ch0_ce_count,S_IRUGO,csrow_ch0_ce_count_show,NULL);
+CSROWDEV_ATTR(ch1_ce_count,S_IRUGO,csrow_ch1_ce_count_show,NULL);
+
+/* control/attribute files */
+CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
+ csrow_ch0_dimm_label_show,
+ csrow_ch0_dimm_label_store);
+CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
+ csrow_ch1_dimm_label_show,
+ csrow_ch1_dimm_label_store);
+
+
+/* Attributes of the CSROW<id> object */
+static struct csrowdev_attribute *csrow_attr[] = {
+ &attr_dev_type,
+ &attr_mem_type,
+ &attr_edac_mode,
+ &attr_size_mb,
+ &attr_ue_count,
+ &attr_ce_count,
+ &attr_ch0_ce_count,
+ &attr_ch1_ce_count,
+ &attr_ch0_dimm_label,
+ &attr_ch1_dimm_label,
+ NULL,
+};
+
+
+/* No memory to release */
+static void edac_csrow_instance_release(struct kobject *kobj)
+{
+ debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
+}
+
+static struct kobj_type ktype_csrow = {
+ .release = edac_csrow_instance_release,
+ .sysfs_ops = &csrowfs_ops,
+ .default_attrs = (struct attribute **) csrow_attr,
+};
+
+/* Create a CSROW object under specifed edac_mc_device */
+static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
+ struct csrow_info *csrow, int index )
+{
+ int err = 0;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ memset(&csrow->kobj, 0, sizeof(csrow->kobj));
+
+ /* generate ..../edac/mc/mc<id>/csrow<index> */
+
+ kobject_init(&csrow->kobj);
+ csrow->kobj.parent = edac_mci_kobj;
+ csrow->kobj.ktype = &ktype_csrow;
+
+ /* name this instance of csrow<id> */
+ err = kobject_set_name(&csrow->kobj,"csrow%d",index);
+ if (!err) {
+ /* Instanstiate the csrow object */
+ err = kobject_register(&csrow->kobj);
+ if (err)
+ debugf0("Failed to register CSROW%d\n",index);
+ else
+ debugf0("Registered CSROW%d\n",index);
+ }
+
+ return err;
+}
+
+/* sysfs data structures and methods for the MCI kobjects */
+
+static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
+ const char *data, size_t count )
+{
+ int row, chan;
+
+ mci->ue_noinfo_count = 0;
+ mci->ce_noinfo_count = 0;
+ mci->ue_count = 0;
+ mci->ce_count = 0;
+ for (row = 0; row < mci->nr_csrows; row++) {
+ struct csrow_info *ri = &mci->csrows[row];
+
+ ri->ue_count = 0;
+ ri->ce_count = 0;
+ for (chan = 0; chan < ri->nr_channels; chan++)
+ ri->channels[chan].ce_count = 0;
+ }
+ mci->start_time = jiffies;
+
+ return count;
+}
+
+static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%d\n", mci->ue_count);
+}
+
+static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%d\n", mci->ce_count);
+}
+
+static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%d\n", mci->ce_noinfo_count);
+}
+
+static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%d\n", mci->ue_noinfo_count);
+}
+
+static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
+}
+
+static ssize_t mci_mod_name_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%s %s\n", mci->mod_name, mci->mod_ver);
+}
+
+static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%s\n", mci->ctl_name);
+}
+
+static int mci_output_edac_cap(char *buf, unsigned long edac_cap)
+{
+ char *p = buf;
+ int bit_idx;
+
+ for (bit_idx = 0; bit_idx < 8 * sizeof(edac_cap); bit_idx++) {
+ if ((edac_cap >> bit_idx) & 0x1)
+ p += sprintf(p, "%s ", edac_caps[bit_idx]);
+ }
+
+ return p - buf;
+}
+
+static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
+{
+ char *p = data;
+
+ p += mci_output_edac_cap(p,mci->edac_ctl_cap);
+ p += sprintf(p, "\n");
+
+ return p - data;
+}
+
+static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ char *p = data;
+
+ p += mci_output_edac_cap(p,mci->edac_cap);
+ p += sprintf(p, "\n");
+
+ return p - data;
+}
+
+static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
+{
+ char *p = buf;
+ int bit_idx;
+
+ for (bit_idx = 0; bit_idx < 8 * sizeof(mtype_cap); bit_idx++) {
+ if ((mtype_cap >> bit_idx) & 0x1)
+ p += sprintf(p, "%s ", mem_types[bit_idx]);
+ }
+
+ return p - buf;
+}
+
+static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data)
+{
+ char *p = data;
+
+ p += mci_output_mtype_cap(p,mci->mtype_cap);
+ p += sprintf(p, "\n");
+
+ return p - data;
+}
+
+static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
+{
+ int total_pages, csrow_idx;
+
+ for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
+ csrow_idx++) {
+ struct csrow_info *csrow = &mci->csrows[csrow_idx];
+
+ if (!csrow->nr_pages)
+ continue;
+ total_pages += csrow->nr_pages;
+ }
+
+ return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
+}
+
+struct mcidev_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct mem_ctl_info *,char *);
+ ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
+};
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
+#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
+
+static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+ struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
+
+ if (mcidev_attr->show)
+ return mcidev_attr->show(mem_ctl_info, buffer);
+ return -EIO;
+}
+
+static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+ struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
+
+ if (mcidev_attr->store)
+ return mcidev_attr->store(mem_ctl_info, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops mci_ops = {
+ .show = mcidev_show,
+ .store = mcidev_store
+};
+
+#define MCIDEV_ATTR(_name,_mode,_show,_store) \
+struct mcidev_attribute mci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* Control file */
+MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
+
+/* Attribute files */
+MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
+MCIDEV_ATTR(module_name,S_IRUGO,mci_mod_name_show,NULL);
+MCIDEV_ATTR(edac_capability,S_IRUGO,mci_edac_capability_show,NULL);
+MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
+MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
+MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
+MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
+MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
+MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
+MCIDEV_ATTR(edac_current_capability,S_IRUGO,
+ mci_edac_current_capability_show,NULL);
+MCIDEV_ATTR(supported_mem_type,S_IRUGO,
+ mci_supported_mem_type_show,NULL);
+
+
+static struct mcidev_attribute *mci_attr[] = {
+ &mci_attr_reset_counters,
+ &mci_attr_module_name,
+ &mci_attr_mc_name,
+ &mci_attr_edac_capability,
+ &mci_attr_edac_current_capability,
+ &mci_attr_supported_mem_type,
+ &mci_attr_size_mb,
+ &mci_attr_seconds_since_reset,
+ &mci_attr_ue_noinfo_count,
+ &mci_attr_ce_noinfo_count,
+ &mci_attr_ue_count,
+ &mci_attr_ce_count,
+ NULL
+};
+
+
+/*
+ * Release of a MC controlling instance
+ */
+static void edac_mci_instance_release(struct kobject *kobj)
+{
+ struct mem_ctl_info *mci;
+ mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj);
+
+ debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n",
+ __func__, mci->mc_idx);
+
+ kfree(mci);
+}
+
+static struct kobj_type ktype_mci = {
+ .release = edac_mci_instance_release,
+ .sysfs_ops = &mci_ops,
+ .default_attrs = (struct attribute **) mci_attr,
+};
+
+#define EDAC_DEVICE_SYMLINK "device"
+
+/*
+ * Create a new Memory Controller kobject instance,
+ * mc<id> under the 'mc' directory
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+ int i;
+ int err;
+ struct csrow_info *csrow;
+ struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
+
+ debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx);
+
+ memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
+ kobject_init(edac_mci_kobj);
+
+ /* set the name of the mc<id> object */
+ err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
+ if (err)
+ return err;
+
+ /* link to our parent the '..../edac/mc' object */
+ edac_mci_kobj->parent = &edac_memctrl_kobj;
+ edac_mci_kobj->ktype = &ktype_mci;
+
+ /* register the mc<id> kobject */
+ err = kobject_register(edac_mci_kobj);
+ if (err)
+ return err;
+
+ /* create a symlink for the device */
+ err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
+ EDAC_DEVICE_SYMLINK);
+ if (err) {
+ kobject_unregister(edac_mci_kobj);
+ return err;
+ }
+
+ /* Make directories for each CSROW object
+ * under the mc<id> kobject
+ */
+ for (i = 0; i < mci->nr_csrows; i++) {
+
+ csrow = &mci->csrows[i];
+
+ /* Only expose populated CSROWs */
+ if (csrow->nr_pages > 0) {
+ err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
+ if (err)
+ goto fail;
+ }
+ }
+
+ /* Mark this MCI instance as having sysfs entries */
+ mci->sysfs_active = MCI_SYSFS_ACTIVE;
+
+ return 0;
+
+
+ /* CSROW error: backout what has already been registered, */
+fail:
+ for ( i--; i >= 0; i--) {
+ if (csrow->nr_pages > 0) {
+ kobject_unregister(&mci->csrows[i].kobj);
+ kobject_put(&mci->csrows[i].kobj);
+ }
+ }
+
+ kobject_unregister(edac_mci_kobj);
+ kobject_put(edac_mci_kobj);
+
+ return err;
+}
+
+/*
+ * remove a Memory Controller instance
+ */
+static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+ int i;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* remove all csrow kobjects */
+ for (i = 0; i < mci->nr_csrows; i++) {
+ if (mci->csrows[i].nr_pages > 0) {
+ kobject_unregister(&mci->csrows[i].kobj);
+ kobject_put(&mci->csrows[i].kobj);
+ }
+ }
+
+ sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
+
+ kobject_unregister(&mci->edac_mci_kobj);
+ kobject_put(&mci->edac_mci_kobj);
+}
+
+/* END OF sysfs data and methods */
+
+#ifdef CONFIG_EDAC_DEBUG
+
+EXPORT_SYMBOL(edac_mc_dump_channel);
+
+void edac_mc_dump_channel(struct channel_info *chan)
+{
+ debugf4("\tchannel = %p\n", chan);
+ debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
+ debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
+ debugf4("\tchannel->label = '%s'\n", chan->label);
+ debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+}
+
+
+EXPORT_SYMBOL(edac_mc_dump_csrow);
+
+void edac_mc_dump_csrow(struct csrow_info *csrow)
+{
+ debugf4("\tcsrow = %p\n", csrow);
+ debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
+ debugf4("\tcsrow->first_page = 0x%lx\n",
+ csrow->first_page);
+ debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
+ debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
+ debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
+ debugf4("\tcsrow->nr_channels = %d\n",
+ csrow->nr_channels);
+ debugf4("\tcsrow->channels = %p\n", csrow->channels);
+ debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
+}
+
+
+EXPORT_SYMBOL(edac_mc_dump_mci);
+
+void edac_mc_dump_mci(struct mem_ctl_info *mci)
+{
+ debugf3("\tmci = %p\n", mci);
+ debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
+ debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
+ debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
+ debugf4("\tmci->edac_check = %p\n", mci->edac_check);
+ debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
+ mci->nr_csrows, mci->csrows);
+ debugf3("\tpdev = %p\n", mci->pdev);
+ debugf3("\tmod_name:ctl_name = %s:%s\n",
+ mci->mod_name, mci->ctl_name);
+ debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
+}
+
+
+#endif /* CONFIG_EDAC_DEBUG */
+
+/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
+ * Adjust 'ptr' so that its alignment is at least as stringent as what the
+ * compiler would provide for X and return the aligned result.
+ *
+ * If 'size' is a constant, the compiler will optimize this whole function
+ * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ */
+static inline char * align_ptr (void *ptr, unsigned size)
+{
+ unsigned align, r;
+
+ /* Here we assume that the alignment of a "long long" is the most
+ * stringent alignment that the compiler will ever provide by default.
+ * As far as I know, this is a reasonable assumption.
+ */
+ if (size > sizeof(long))
+ align = sizeof(long long);
+ else if (size > sizeof(int))
+ align = sizeof(long);
+ else if (size > sizeof(short))
+ align = sizeof(int);
+ else if (size > sizeof(char))
+ align = sizeof(short);
+ else
+ return (char *) ptr;
+
+ r = size % align;
+
+ if (r == 0)
+ return (char *) ptr;
+
+ return (char *) (((unsigned long) ptr) + align - r);
+}
+
+
+EXPORT_SYMBOL(edac_mc_alloc);
+
+/**
+ * edac_mc_alloc: Allocate a struct mem_ctl_info structure
+ * @size_pvt: size of private storage needed
+ * @nr_csrows: Number of CWROWS needed for this MC
+ * @nr_chans: Number of channels for the MC
+ *
+ * Everything is kmalloc'ed as one big chunk - more efficient.
+ * Only can be used if all structures have the same lifetime - otherwise
+ * you have to allocate and initialize your own structures.
+ *
+ * Use edac_mc_free() to free mc structures allocated by this function.
+ *
+ * Returns:
+ * NULL allocation failed
+ * struct mem_ctl_info pointer
+ */
+struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+ unsigned nr_chans)
+{
+ struct mem_ctl_info *mci;
+ struct csrow_info *csi, *csrow;
+ struct channel_info *chi, *chp, *chan;
+ void *pvt;
+ unsigned size;
+ int row, chn;
+
+ /* Figure out the offsets of the various items from the start of an mc
+ * structure. We want the alignment of each item to be at least as
+ * stringent as what the compiler would provide if we could simply
+ * hardcode everything into a single struct.
+ */
+ mci = (struct mem_ctl_info *) 0;
+ csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
+ chi = (struct channel_info *)
+ align_ptr(&csi[nr_csrows], sizeof(*chi));
+ pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+ size = ((unsigned long) pvt) + sz_pvt;
+
+ if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* Adjust pointers so they point within the memory we just allocated
+ * rather than an imaginary chunk of memory located at address 0.
+ */
+ csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
+ chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
+ pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
+
+ memset(mci, 0, size); /* clear all fields */
+
+ mci->csrows = csi;
+ mci->pvt_info = pvt;
+ mci->nr_csrows = nr_csrows;
+
+ for (row = 0; row < nr_csrows; row++) {
+ csrow = &csi[row];
+ csrow->csrow_idx = row;
+ csrow->mci = mci;
+ csrow->nr_channels = nr_chans;
+ chp = &chi[row * nr_chans];
+ csrow->channels = chp;
+
+ for (chn = 0; chn < nr_chans; chn++) {
+ chan = &chp[chn];
+ chan->chan_idx = chn;
+ chan->csrow = csrow;
+ }
+ }
+
+ return mci;
+}
+
+
+EXPORT_SYMBOL(edac_mc_free);
+
+/**
+ * edac_mc_free: Free a previously allocated 'mci' structure
+ * @mci: pointer to a struct mem_ctl_info structure
+ *
+ * Free up a previously allocated mci structure
+ * A MCI structure can be in 2 states after being allocated
+ * by edac_mc_alloc().
+ * 1) Allocated in a MC driver's probe, but not yet committed
+ * 2) Allocated and committed, by a call to edac_mc_add_mc()
+ * edac_mc_add_mc() is the function that adds the sysfs entries
+ * thus, this free function must determine which state the 'mci'
+ * structure is in, then either free it directly or
+ * perform kobject cleanup by calling edac_remove_sysfs_mci_device().
+ *
+ * VOID Return
+ */
+void edac_mc_free(struct mem_ctl_info *mci)
+{
+ /* only if sysfs entries for this mci instance exist
+ * do we remove them and defer the actual kfree via
+ * the kobject 'release()' callback.
+ *
+ * Otherwise, do a straight kfree now.
+ */
+ if (mci->sysfs_active == MCI_SYSFS_ACTIVE)
+ edac_remove_sysfs_mci_device(mci);
+ else
+ kfree(mci);
+}
+
+
+
+EXPORT_SYMBOL(edac_mc_find_mci_by_pdev);
+
+struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct list_head *item;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ if (mci->pdev == pdev)
+ return mci;
+ }
+
+ return NULL;
+}
+
+static int add_mc_to_global_list (struct mem_ctl_info *mci)
+{
+ struct list_head *item, *insert_before;
+ struct mem_ctl_info *p;
+ int i;
+
+ if (list_empty(&mc_devices)) {
+ mci->mc_idx = 0;
+ insert_before = &mc_devices;
+ } else {
+ if (edac_mc_find_mci_by_pdev(mci->pdev)) {
+ printk(KERN_WARNING
+ "EDAC MC: %s (%s) %s %s already assigned %d\n",
+ mci->pdev->dev.bus_id, pci_name(mci->pdev),
+ mci->mod_name, mci->ctl_name, mci->mc_idx);
+ return 1;
+ }
+
+ insert_before = NULL;
+ i = 0;
+
+ list_for_each(item, &mc_devices) {
+ p = list_entry(item, struct mem_ctl_info, link);
+
+ if (p->mc_idx != i) {
+ insert_before = item;
+ break;
+ }
+
+ i++;
+ }
+
+ mci->mc_idx = i;
+
+ if (insert_before == NULL)
+ insert_before = &mc_devices;
+ }
+
+ list_add_tail_rcu(&mci->link, insert_before);
+ return 0;
+}
+
+
+
+EXPORT_SYMBOL(edac_mc_add_mc);
+
+/**
+ * edac_mc_add_mc: Insert the 'mci' structure into the mci global list
+ * @mci: pointer to the mci structure to be added to the list
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+
+/* FIXME - should a warning be printed if no error detection? correction? */
+int edac_mc_add_mc(struct mem_ctl_info *mci)
+{
+ int rc = 1;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+#ifdef CONFIG_EDAC_DEBUG
+ if (edac_debug_level >= 3)
+ edac_mc_dump_mci(mci);
+ if (edac_debug_level >= 4) {
+ int i;
+
+ for (i = 0; i < mci->nr_csrows; i++) {
+ int j;
+ edac_mc_dump_csrow(&mci->csrows[i]);
+ for (j = 0; j < mci->csrows[i].nr_channels; j++)
+ edac_mc_dump_channel(&mci->csrows[i].
+ channels[j]);
+ }
+ }
+#endif
+ down(&mem_ctls_mutex);
+
+ if (add_mc_to_global_list(mci))
+ goto finish;
+
+ /* set load time so that error rate can be tracked */
+ mci->start_time = jiffies;
+
+ if (edac_create_sysfs_mci_device(mci)) {
+ printk(KERN_WARNING
+ "EDAC MC%d: failed to create sysfs device\n",
+ mci->mc_idx);
+ /* FIXME - should there be an error code and unwind? */
+ goto finish;
+ }
+
+ /* Report action taken */
+ printk(KERN_INFO
+ "EDAC MC%d: Giving out device to %s %s: PCI %s\n",
+ mci->mc_idx, mci->mod_name, mci->ctl_name,
+ pci_name(mci->pdev));
+
+
+ rc = 0;
+
+finish:
+ up(&mem_ctls_mutex);
+ return rc;
+}
+
+
+
+static void complete_mc_list_del (struct rcu_head *head)
+{
+ struct mem_ctl_info *mci;
+
+ mci = container_of(head, struct mem_ctl_info, rcu);
+ INIT_LIST_HEAD(&mci->link);
+ complete(&mci->complete);
+}
+
+static void del_mc_from_global_list (struct mem_ctl_info *mci)
+{
+ list_del_rcu(&mci->link);
+ init_completion(&mci->complete);
+ call_rcu(&mci->rcu, complete_mc_list_del);
+ wait_for_completion(&mci->complete);
+}
+
+EXPORT_SYMBOL(edac_mc_del_mc);
+
+/**
+ * edac_mc_del_mc: Remove the specified mci structure from global list
+ * @mci: Pointer to struct mem_ctl_info structure
+ *
+ * Returns:
+ * 0 Success
+ * 1 Failure
+ */
+int edac_mc_del_mc(struct mem_ctl_info *mci)
+{
+ int rc = 1;
+
+ debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ down(&mem_ctls_mutex);
+ del_mc_from_global_list(mci);
+ printk(KERN_INFO
+ "EDAC MC%d: Removed device %d for %s %s: PCI %s\n",
+ mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name,
+ pci_name(mci->pdev));
+ rc = 0;
+ up(&mem_ctls_mutex);
+
+ return rc;
+}
+
+
+EXPORT_SYMBOL(edac_mc_scrub_block);
+
+void edac_mc_scrub_block(unsigned long page, unsigned long offset,
+ u32 size)
+{
+ struct page *pg;
+ void *virt_addr;
+ unsigned long flags = 0;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* ECC error page was not in our memory. Ignore it. */
+ if(!pfn_valid(page))
+ return;
+
+ /* Find the actual page structure then map it and fix */
+ pg = pfn_to_page(page);
+
+ if (PageHighMem(pg))
+ local_irq_save(flags);
+
+ virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
+
+ /* Perform architecture specific atomic scrub operation */
+ atomic_scrub(virt_addr + offset, size);
+
+ /* Unmap and complete */
+ kunmap_atomic(virt_addr, KM_BOUNCE_READ);
+
+ if (PageHighMem(pg))
+ local_irq_restore(flags);
+}
+
+
+/* FIXME - should return -1 */
+EXPORT_SYMBOL(edac_mc_find_csrow_by_page);
+
+int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+ unsigned long page)
+{
+ struct csrow_info *csrows = mci->csrows;
+ int row, i;
+
+ debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__,
+ page);
+ row = -1;
+
+ for (i = 0; i < mci->nr_csrows; i++) {
+ struct csrow_info *csrow = &csrows[i];
+
+ if (csrow->nr_pages == 0)
+ continue;
+
+ debugf3("MC%d: " __FILE__
+ ": %s(): first(0x%lx) page(0x%lx)"
+ " last(0x%lx) mask(0x%lx)\n", mci->mc_idx,
+ __func__, csrow->first_page, page,
+ csrow->last_page, csrow->page_mask);
+
+ if ((page >= csrow->first_page) &&
+ (page <= csrow->last_page) &&
+ ((page & csrow->page_mask) ==
+ (csrow->first_page & csrow->page_mask))) {
+ row = i;
+ break;
+ }
+ }
+
+ if (row == -1)
+ printk(KERN_ERR
+ "EDAC MC%d: could not look up page error address %lx\n",
+ mci->mc_idx, (unsigned long) page);
+
+ return row;
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ce);
+
+/* FIXME - setable log (warning/emerg) levels */
+/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
+void edac_mc_handle_ce(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page,
+ unsigned long syndrome, int row, int channel,
+ const char *msg)
+{
+ unsigned long remapped_page;
+
+ debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+
+ /* FIXME - maybe make panic on INTERNAL ERROR an option */
+ if (row >= mci->nr_csrows || row < 0) {
+ /* something is wrong */
+ printk(KERN_ERR
+ "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
+ mci->mc_idx, row, mci->nr_csrows);
+ edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+ if (channel >= mci->csrows[row].nr_channels || channel < 0) {
+ /* something is wrong */
+ printk(KERN_ERR
+ "EDAC MC%d: INTERNAL ERROR: channel out of range "
+ "(%d >= %d)\n",
+ mci->mc_idx, channel, mci->csrows[row].nr_channels);
+ edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ if (log_ce)
+ /* FIXME - put in DIMM location */
+ printk(KERN_WARNING
+ "EDAC MC%d: CE page 0x%lx, offset 0x%lx,"
+ " grain %d, syndrome 0x%lx, row %d, channel %d,"
+ " label \"%s\": %s\n", mci->mc_idx,
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, syndrome, row, channel,
+ mci->csrows[row].channels[channel].label, msg);
+
+ mci->ce_count++;
+ mci->csrows[row].ce_count++;
+ mci->csrows[row].channels[channel].ce_count++;
+
+ if (mci->scrub_mode & SCRUB_SW_SRC) {
+ /*
+ * Some MC's can remap memory so that it is still available
+ * at a different address when PCI devices map into memory.
+ * MC's that can't do this lose the memory where PCI devices
+ * are mapped. This mapping is MC dependant and so we call
+ * back into the MC driver for it to map the MC page to
+ * a physical (CPU) page which can then be mapped to a virtual
+ * page - which can then be scrubbed.
+ */
+ remapped_page = mci->ctl_page_to_phys ?
+ mci->ctl_page_to_phys(mci, page_frame_number) :
+ page_frame_number;
+
+ edac_mc_scrub_block(remapped_page, offset_in_page,
+ mci->csrows[row].grain);
+ }
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ce_no_info);
+
+void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
+ const char *msg)
+{
+ if (log_ce)
+ printk(KERN_WARNING
+ "EDAC MC%d: CE - no information available: %s\n",
+ mci->mc_idx, msg);
+ mci->ce_noinfo_count++;
+ mci->ce_count++;
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ue);
+
+void edac_mc_handle_ue(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, int row,
+ const char *msg)
+{
+ int len = EDAC_MC_LABEL_LEN * 4;
+ char labels[len + 1];
+ char *pos = labels;
+ int chan;
+ int chars;
+
+ debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+
+ /* FIXME - maybe make panic on INTERNAL ERROR an option */
+ if (row >= mci->nr_csrows || row < 0) {
+ /* something is wrong */
+ printk(KERN_ERR
+ "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
+ mci->mc_idx, row, mci->nr_csrows);
+ edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ chars = snprintf(pos, len + 1, "%s",
+ mci->csrows[row].channels[0].label);
+ len -= chars;
+ pos += chars;
+ for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
+ chan++) {
+ chars = snprintf(pos, len + 1, ":%s",
+ mci->csrows[row].channels[chan].label);
+ len -= chars;
+ pos += chars;
+ }
+
+ if (log_ue)
+ printk(KERN_EMERG
+ "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
+ " labels \"%s\": %s\n", mci->mc_idx,
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, row, labels, msg);
+
+ if (panic_on_ue)
+ panic
+ ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
+ " labels \"%s\": %s\n", mci->mc_idx,
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, row, labels, msg);
+
+ mci->ue_count++;
+ mci->csrows[row].ue_count++;
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ue_no_info);
+
+void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
+ const char *msg)
+{
+ if (panic_on_ue)
+ panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+
+ if (log_ue)
+ printk(KERN_WARNING
+ "EDAC MC%d: UE - no information available: %s\n",
+ mci->mc_idx, msg);
+ mci->ue_noinfo_count++;
+ mci->ue_count++;
+}
+
+
+#ifdef CONFIG_PCI
+
+static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
+{
+ int where;
+ u16 status;
+
+ where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
+ pci_read_config_word(dev, where, &status);
+
+ /* If we get back 0xFFFF then we must suspect that the card has been pulled but
+ the Linux PCI layer has not yet finished cleaning up. We don't want to report
+ on such devices */
+
+ if (status == 0xFFFF) {
+ u32 sanity;
+ pci_read_config_dword(dev, 0, &sanity);
+ if (sanity == 0xFFFFFFFF)
+ return 0;
+ }
+ status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_PARITY;
+
+ if (status)
+ /* reset only the bits we are interested in */
+ pci_write_config_word(dev, where, status);
+
+ return status;
+}
+
+typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
+
+/* Clear any PCI parity errors logged by this device. */
+static void edac_pci_dev_parity_clear( struct pci_dev *dev )
+{
+ u8 header_type;
+
+ get_pci_parity_status(dev, 0);
+
+ /* read the device TYPE, looking for bridges */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
+ get_pci_parity_status(dev, 1);
+}
+
+/*
+ * PCI Parity polling
+ *
+ */
+static void edac_pci_dev_parity_test(struct pci_dev *dev)
+{
+ u16 status;
+ u8 header_type;
+
+ /* read the STATUS register on this device
+ */
+ status = get_pci_parity_status(dev, 0);
+
+ debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
+
+ /* check the status reg for errors */
+ if (status) {
+ if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
+ printk(KERN_CRIT
+ "EDAC PCI- "
+ "Signaled System Error on %s\n",
+ pci_name (dev));
+
+ if (status & (PCI_STATUS_PARITY)) {
+ printk(KERN_CRIT
+ "EDAC PCI- "
+ "Master Data Parity Error on %s\n",
+ pci_name (dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+ printk(KERN_CRIT
+ "EDAC PCI- "
+ "Detected Parity Error on %s\n",
+ pci_name (dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+ }
+
+ /* read the device TYPE, looking for bridges */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* On bridges, need to examine secondary status register */
+ status = get_pci_parity_status(dev, 1);
+
+ debugf2("PCI SEC_STATUS= 0x%04x %s\n",
+ status, dev->dev.bus_id );
+
+ /* check the secondary status reg for errors */
+ if (status) {
+ if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
+ printk(KERN_CRIT
+ "EDAC PCI-Bridge- "
+ "Signaled System Error on %s\n",
+ pci_name (dev));
+
+ if (status & (PCI_STATUS_PARITY)) {
+ printk(KERN_CRIT
+ "EDAC PCI-Bridge- "
+ "Master Data Parity Error on %s\n",
+ pci_name (dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+ printk(KERN_CRIT
+ "EDAC PCI-Bridge- "
+ "Detected Parity Error on %s\n",
+ pci_name (dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+ }
+ }
+}
+
+/*
+ * check_dev_on_list: Scan for a PCI device on a white/black list
+ * @list: an EDAC &edac_pci_device_list white/black list pointer
+ * @free_index: index of next free entry on the list
+ * @pci_dev: PCI Device pointer
+ *
+ * see if list contains the device.
+ *
+ * Returns: 0 not found
+ * 1 found on list
+ */
+static int check_dev_on_list(struct edac_pci_device_list *list, int free_index,
+ struct pci_dev *dev)
+{
+ int i;
+ int rc = 0; /* Assume not found */
+ unsigned short vendor=dev->vendor;
+ unsigned short device=dev->device;
+
+ /* Scan the list, looking for a vendor/device match
+ */
+ for (i = 0; i < free_index; i++, list++ ) {
+ if ( (list->vendor == vendor ) &&
+ (list->device == device )) {
+ rc = 1;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * pci_dev parity list iterator
+ * Scan the PCI device list for one iteration, looking for SERRORs
+ * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
+ */
+static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
+{
+ struct pci_dev *dev=NULL;
+
+ /* request for kernel access to the next PCI device, if any,
+ * and while we are looking at it have its reference count
+ * bumped until we are done with it
+ */
+ while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+
+ /* if whitelist exists then it has priority, so only scan those
+ * devices on the whitelist
+ */
+ if (pci_whitelist_count > 0 ) {
+ if (check_dev_on_list(pci_whitelist,
+ pci_whitelist_count, dev))
+ fn(dev);
+ } else {
+ /*
+ * if no whitelist, then check if this devices is
+ * blacklisted
+ */
+ if (!check_dev_on_list(pci_blacklist,
+ pci_blacklist_count, dev))
+ fn(dev);
+ }
+ }
+}
+
+static void do_pci_parity_check(void)
+{
+ unsigned long flags;
+ int before_count;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if (!check_pci_parity)
+ return;
+
+ before_count = atomic_read(&pci_parity_count);
+
+ /* scan all PCI devices looking for a Parity Error on devices and
+ * bridges
+ */
+ local_irq_save(flags);
+ edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
+ local_irq_restore(flags);
+
+ /* Only if operator has selected panic on PCI Error */
+ if (panic_on_pci_parity) {
+ /* If the count is different 'after' from 'before' */
+ if (before_count != atomic_read(&pci_parity_count))
+ panic("EDAC: PCI Parity Error");
+ }
+}
+
+
+static inline void clear_pci_parity_errors(void)
+{
+ /* Clear any PCI bus parity errors that devices initially have logged
+ * in their registers.
+ */
+ edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
+}
+
+
+#else /* CONFIG_PCI */
+
+
+static inline void do_pci_parity_check(void)
+{
+ /* no-op */
+}
+
+
+static inline void clear_pci_parity_errors(void)
+{
+ /* no-op */
+}
+
+
+#endif /* CONFIG_PCI */
+
+/*
+ * Iterate over all MC instances and check for ECC, et al, errors
+ */
+static inline void check_mc_devices (void)
+{
+ unsigned long flags;
+ struct list_head *item;
+ struct mem_ctl_info *mci;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* during poll, have interrupts off */
+ local_irq_save(flags);
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ if (mci->edac_check != NULL)
+ mci->edac_check(mci);
+ }
+
+ local_irq_restore(flags);
+}
+
+
+/*
+ * Check MC status every poll_msec.
+ * Check PCI status every poll_msec as well.
+ *
+ * This where the work gets done for edac.
+ *
+ * SMP safe, doesn't use NMI, and auto-rate-limits.
+ */
+static void do_edac_check(void)
+{
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ check_mc_devices();
+
+ do_pci_parity_check();
+}
+
+
+/*
+ * EDAC thread state information
+ */
+struct bs_thread_info
+{
+ struct task_struct *task;
+ struct completion *event;
+ char *name;
+ void (*run)(void);
+};
+
+static struct bs_thread_info bs_thread;
+
+/*
+ * edac_kernel_thread
+ * This the kernel thread that processes edac operations
+ * in a normal thread environment
+ */
+static int edac_kernel_thread(void *arg)
+{
+ struct bs_thread_info *thread = (struct bs_thread_info *) arg;
+
+ /* detach thread */
+ daemonize(thread->name);
+
+ current->exit_signal = SIGCHLD;
+ allow_signal(SIGKILL);
+ thread->task = current;
+
+ /* indicate to starting task we have started */
+ complete(thread->event);
+
+ /* loop forever, until we are told to stop */
+ while(thread->run != NULL) {
+ void (*run)(void);
+
+ /* call the function to check the memory controllers */
+ run = thread->run;
+ if (run)
+ run();
+
+ if (signal_pending(current))
+ flush_signals(current);
+
+ /* ensure we are interruptable */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* goto sleep for the interval */
+ schedule_timeout((HZ * poll_msec) / 1000);
+ try_to_freeze();
+ }
+
+ /* notify waiter that we are exiting */
+ complete(thread->event);
+
+ return 0;
+}
+
+/*
+ * edac_mc_init
+ * module initialization entry point
+ */
+static int __init edac_mc_init(void)
+{
+ int ret;
+ struct completion event;
+
+ printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n");
+
+ /*
+ * Harvest and clear any boot/initialization PCI parity errors
+ *
+ * FIXME: This only clears errors logged by devices present at time of
+ * module initialization. We should also do an initial clear
+ * of each newly hotplugged device.
+ */
+ clear_pci_parity_errors();
+
+ /* perform check for first time to harvest boot leftovers */
+ do_edac_check();
+
+ /* Create the MC sysfs entires */
+ if (edac_sysfs_memctrl_setup()) {
+ printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n");
+ return -ENODEV;
+ }
+
+ /* Create the PCI parity sysfs entries */
+ if (edac_sysfs_pci_setup()) {
+ edac_sysfs_memctrl_teardown();
+ printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n");
+ return -ENODEV;
+ }
+
+ /* Create our kernel thread */
+ init_completion(&event);
+ bs_thread.event = &event;
+ bs_thread.name = "kedac";
+ bs_thread.run = do_edac_check;
+
+ /* create our kernel thread */
+ ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL);
+ if (ret < 0) {
+ /* remove the sysfs entries */
+ edac_sysfs_memctrl_teardown();
+ edac_sysfs_pci_teardown();
+ return -ENOMEM;
+ }
+
+ /* wait for our kernel theard ack that it is up and running */
+ wait_for_completion(&event);
+
+ return 0;
+}
+
+
+/*
+ * edac_mc_exit()
+ * module exit/termination functioni
+ */
+static void __exit edac_mc_exit(void)
+{
+ struct completion event;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ init_completion(&event);
+ bs_thread.event = &event;
+
+ /* As soon as ->run is set to NULL, the task could disappear,
+ * so we need to hold tasklist_lock until we have sent the signal
+ */
+ read_lock(&tasklist_lock);
+ bs_thread.run = NULL;
+ send_sig(SIGKILL, bs_thread.task, 1);
+ read_unlock(&tasklist_lock);
+ wait_for_completion(&event);
+
+ /* tear down the sysfs device */
+ edac_sysfs_memctrl_teardown();
+ edac_sysfs_pci_teardown();
+}
+
+
+
+
+module_init(edac_mc_init);
+module_exit(edac_mc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
+ "Based on.work by Dan Hollis et al");
+MODULE_DESCRIPTION("Core library routines for MC reporting");
+
+module_param(panic_on_ue, int, 0644);
+MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
+module_param(check_pci_parity, int, 0644);
+MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
+module_param(panic_on_pci_parity, int, 0644);
+MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
+module_param(log_ue, int, 0644);
+MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
+module_param(log_ce, int, 0644);
+MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
+module_param(poll_msec, int, 0644);
+MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
+#ifdef CONFIG_EDAC_DEBUG
+module_param(edac_debug_level, int, 0644);
+MODULE_PARM_DESC(edac_debug_level, "Debug level");
+#endif
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
new file mode 100644
index 000000000000..75ecf484a43a
--- /dev/null
+++ b/drivers/edac/edac_mc.h
@@ -0,0 +1,448 @@
+/*
+ * MC kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * $Id: edac_mc.h,v 1.4.2.10 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#ifndef _EDAC_MC_H_
+#define _EDAC_MC_H_
+
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+
+
+#define EDAC_MC_LABEL_LEN 31
+#define MC_PROC_NAME_MAX_LEN 7
+
+#if PAGE_SHIFT < 20
+#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
+#else /* PAGE_SHIFT > 20 */
+#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
+#endif
+
+#ifdef CONFIG_EDAC_DEBUG
+extern int edac_debug_level;
+#define edac_debug_printk(level, fmt, args...) \
+do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0)
+#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
+#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
+#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
+#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
+#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
+#else /* !CONFIG_EDAC_DEBUG */
+#define debugf0( ... )
+#define debugf1( ... )
+#define debugf2( ... )
+#define debugf3( ... )
+#define debugf4( ... )
+#endif /* !CONFIG_EDAC_DEBUG */
+
+
+#define bs_xstr(s) bs_str(s)
+#define bs_str(s) #s
+#define BS_MOD_STR bs_xstr(KBUILD_BASENAME)
+
+#define BIT(x) (1 << (x))
+
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev
+
+/* memory devices */
+enum dev_type {
+ DEV_UNKNOWN = 0,
+ DEV_X1,
+ DEV_X2,
+ DEV_X4,
+ DEV_X8,
+ DEV_X16,
+ DEV_X32, /* Do these parts exist? */
+ DEV_X64 /* Do these parts exist? */
+};
+
+#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
+#define DEV_FLAG_X1 BIT(DEV_X1)
+#define DEV_FLAG_X2 BIT(DEV_X2)
+#define DEV_FLAG_X4 BIT(DEV_X4)
+#define DEV_FLAG_X8 BIT(DEV_X8)
+#define DEV_FLAG_X16 BIT(DEV_X16)
+#define DEV_FLAG_X32 BIT(DEV_X32)
+#define DEV_FLAG_X64 BIT(DEV_X64)
+
+/* memory types */
+enum mem_type {
+ MEM_EMPTY = 0, /* Empty csrow */
+ MEM_RESERVED, /* Reserved csrow type */
+ MEM_UNKNOWN, /* Unknown csrow type */
+ MEM_FPM, /* Fast page mode */
+ MEM_EDO, /* Extended data out */
+ MEM_BEDO, /* Burst Extended data out */
+ MEM_SDR, /* Single data rate SDRAM */
+ MEM_RDR, /* Registered single data rate SDRAM */
+ MEM_DDR, /* Double data rate SDRAM */
+ MEM_RDDR, /* Registered Double data rate SDRAM */
+ MEM_RMBS /* Rambus DRAM */
+};
+
+#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
+#define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
+#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
+#define MEM_FLAG_FPM BIT(MEM_FPM)
+#define MEM_FLAG_EDO BIT(MEM_EDO)
+#define MEM_FLAG_BEDO BIT(MEM_BEDO)
+#define MEM_FLAG_SDR BIT(MEM_SDR)
+#define MEM_FLAG_RDR BIT(MEM_RDR)
+#define MEM_FLAG_DDR BIT(MEM_DDR)
+#define MEM_FLAG_RDDR BIT(MEM_RDDR)
+#define MEM_FLAG_RMBS BIT(MEM_RMBS)
+
+
+/* chipset Error Detection and Correction capabilities and mode */
+enum edac_type {
+ EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
+ EDAC_NONE, /* Doesnt support ECC */
+ EDAC_RESERVED, /* Reserved ECC type */
+ EDAC_PARITY, /* Detects parity errors */
+ EDAC_EC, /* Error Checking - no correction */
+ EDAC_SECDED, /* Single bit error correction, Double detection */
+ EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */
+ EDAC_S4ECD4ED, /* Chipkill x4 devices */
+ EDAC_S8ECD8ED, /* Chipkill x8 devices */
+ EDAC_S16ECD16ED, /* Chipkill x16 devices */
+};
+
+#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
+#define EDAC_FLAG_NONE BIT(EDAC_NONE)
+#define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
+#define EDAC_FLAG_EC BIT(EDAC_EC)
+#define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
+#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
+#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
+#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
+#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
+
+
+/* scrubbing capabilities */
+enum scrub_type {
+ SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
+ SCRUB_NONE, /* No scrubber */
+ SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */
+ SCRUB_SW_SRC, /* Software scrub only errors */
+ SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */
+ SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */
+ SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */
+ SCRUB_HW_SRC, /* Hardware scrub only errors */
+ SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */
+ SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */
+};
+
+#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
+#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC_CORR)
+#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC_CORR)
+#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
+#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
+#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC_CORR)
+#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
+#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
+
+enum mci_sysfs_status {
+ MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */
+ MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */
+};
+
+/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
+
+/*
+ * There are several things to be aware of that aren't at all obvious:
+ *
+ *
+ * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
+ *
+ * These are some of the many terms that are thrown about that don't always
+ * mean what people think they mean (Inconceivable!). In the interest of
+ * creating a common ground for discussion, terms and their definitions
+ * will be established.
+ *
+ * Memory devices: The individual chip on a memory stick. These devices
+ * commonly output 4 and 8 bits each. Grouping several
+ * of these in parallel provides 64 bits which is common
+ * for a memory stick.
+ *
+ * Memory Stick: A printed circuit board that agregates multiple
+ * memory devices in parallel. This is the atomic
+ * memory component that is purchaseable by Joe consumer
+ * and loaded into a memory socket.
+ *
+ * Socket: A physical connector on the motherboard that accepts
+ * a single memory stick.
+ *
+ * Channel: Set of memory devices on a memory stick that must be
+ * grouped in parallel with one or more additional
+ * channels from other memory sticks. This parallel
+ * grouping of the output from multiple channels are
+ * necessary for the smallest granularity of memory access.
+ * Some memory controllers are capable of single channel -
+ * which means that memory sticks can be loaded
+ * individually. Other memory controllers are only
+ * capable of dual channel - which means that memory
+ * sticks must be loaded as pairs (see "socket set").
+ *
+ * Chip-select row: All of the memory devices that are selected together.
+ * for a single, minimum grain of memory access.
+ * This selects all of the parallel memory devices across
+ * all of the parallel channels. Common chip-select rows
+ * for single channel are 64 bits, for dual channel 128
+ * bits.
+ *
+ * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory.
+ * Motherboards commonly drive two chip-select pins to
+ * a memory stick. A single-ranked stick, will occupy
+ * only one of those rows. The other will be unused.
+ *
+ * Double-Ranked stick: A double-ranked stick has two chip-select rows which
+ * access different sets of memory devices. The two
+ * rows cannot be accessed concurrently.
+ *
+ * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
+ * A double-sided stick has two chip-select rows which
+ * access different sets of memory devices. The two
+ * rows cannot be accessed concurrently. "Double-sided"
+ * is irrespective of the memory devices being mounted
+ * on both sides of the memory stick.
+ *
+ * Socket set: All of the memory sticks that are required for for
+ * a single memory access or all of the memory sticks
+ * spanned by a chip-select row. A single socket set
+ * has two chip-select rows and if double-sided sticks
+ * are used these will occupy those chip-select rows.
+ *
+ * Bank: This term is avoided because it is unclear when
+ * needing to distinguish between chip-select rows and
+ * socket sets.
+ *
+ * Controller pages:
+ *
+ * Physical pages:
+ *
+ * Virtual pages:
+ *
+ *
+ * STRUCTURE ORGANIZATION AND CHOICES
+ *
+ *
+ *
+ * PS - I enjoyed writing all that about as much as you enjoyed reading it.
+ */
+
+
+struct channel_info {
+ int chan_idx; /* channel index */
+ u32 ce_count; /* Correctable Errors for this CHANNEL */
+ char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
+ struct csrow_info *csrow; /* the parent */
+};
+
+
+struct csrow_info {
+ unsigned long first_page; /* first page number in dimm */
+ unsigned long last_page; /* last page number in dimm */
+ unsigned long page_mask; /* used for interleaving -
+ 0UL for non intlv */
+ u32 nr_pages; /* number of pages in csrow */
+ u32 grain; /* granularity of reported error in bytes */
+ int csrow_idx; /* the chip-select row */
+ enum dev_type dtype; /* memory device type */
+ u32 ue_count; /* Uncorrectable Errors for this csrow */
+ u32 ce_count; /* Correctable Errors for this csrow */
+ enum mem_type mtype; /* memory csrow type */
+ enum edac_type edac_mode; /* EDAC mode for this csrow */
+ struct mem_ctl_info *mci; /* the parent */
+
+ struct kobject kobj; /* sysfs kobject for this csrow */
+
+ /* FIXME the number of CHANNELs might need to become dynamic */
+ u32 nr_channels;
+ struct channel_info *channels;
+};
+
+
+struct mem_ctl_info {
+ struct list_head link; /* for global list of mem_ctl_info structs */
+ unsigned long mtype_cap; /* memory types supported by mc */
+ unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
+ unsigned long edac_cap; /* configuration capabilities - this is
+ closely related to edac_ctl_cap. The
+ difference is that the controller
+ may be capable of s4ecd4ed which would
+ be listed in edac_ctl_cap, but if
+ channels aren't capable of s4ecd4ed then the
+ edac_cap would not have that capability. */
+ unsigned long scrub_cap; /* chipset scrub capabilities */
+ enum scrub_type scrub_mode; /* current scrub mode */
+
+ enum mci_sysfs_status sysfs_active; /* status of sysfs */
+
+ /* pointer to edac checking routine */
+ void (*edac_check) (struct mem_ctl_info * mci);
+ /*
+ * Remaps memory pages: controller pages to physical pages.
+ * For most MC's, this will be NULL.
+ */
+ /* FIXME - why not send the phys page to begin with? */
+ unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
+ unsigned long page);
+ int mc_idx;
+ int nr_csrows;
+ struct csrow_info *csrows;
+ /*
+ * FIXME - what about controllers on other busses? - IDs must be
+ * unique. pdev pointer should be sufficiently unique, but
+ * BUS:SLOT.FUNC numbers may not be unique.
+ */
+ struct pci_dev *pdev;
+ const char *mod_name;
+ const char *mod_ver;
+ const char *ctl_name;
+ char proc_name[MC_PROC_NAME_MAX_LEN + 1];
+ void *pvt_info;
+ u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
+ u32 ce_noinfo_count; /* Correctable Errors w/o info */
+ u32 ue_count; /* Total Uncorrectable Errors for this MC */
+ u32 ce_count; /* Total Correctable Errors for this MC */
+ unsigned long start_time; /* mci load start time (in jiffies) */
+
+ /* this stuff is for safe removal of mc devices from global list while
+ * NMI handlers may be traversing list
+ */
+ struct rcu_head rcu;
+ struct completion complete;
+
+ /* edac sysfs device control */
+ struct kobject edac_mci_kobj;
+};
+
+
+
+/* write all or some bits in a byte-register*/
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset,
+ u8 value, u8 mask)
+{
+ if (mask != 0xff) {
+ u8 buf;
+ pci_read_config_byte(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+ pci_write_config_byte(pdev, offset, value);
+}
+
+
+/* write all or some bits in a word-register*/
+static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
+ u16 value, u16 mask)
+{
+ if (mask != 0xffff) {
+ u16 buf;
+ pci_read_config_word(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+ pci_write_config_word(pdev, offset, value);
+}
+
+
+/* write all or some bits in a dword-register*/
+static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
+ u32 value, u32 mask)
+{
+ if (mask != 0xffff) {
+ u32 buf;
+ pci_read_config_dword(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+ pci_write_config_dword(pdev, offset, value);
+}
+
+
+#ifdef CONFIG_EDAC_DEBUG
+void edac_mc_dump_channel(struct channel_info *chan);
+void edac_mc_dump_mci(struct mem_ctl_info *mci);
+void edac_mc_dump_csrow(struct csrow_info *csrow);
+#endif /* CONFIG_EDAC_DEBUG */
+
+extern int edac_mc_add_mc(struct mem_ctl_info *mci);
+extern int edac_mc_del_mc(struct mem_ctl_info *mci);
+
+extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+ unsigned long page);
+
+extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev
+ *pdev);
+
+extern void edac_mc_scrub_block(unsigned long page,
+ unsigned long offset, u32 size);
+
+/*
+ * The no info errors are used when error overflows are reported.
+ * There are a limited number of error logging registers that can
+ * be exausted. When all registers are exhausted and an additional
+ * error occurs then an error overflow register records that an
+ * error occured and the type of error, but doesn't have any
+ * further information. The ce/ue versions make for cleaner
+ * reporting logic and function interface - reduces conditional
+ * statement clutter and extra function arguments.
+ */
+extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page,
+ unsigned long syndrome,
+ int row, int channel, const char *msg);
+
+extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
+ const char *msg);
+
+extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page,
+ int row, const char *msg);
+
+extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
+ const char *msg);
+
+/*
+ * This kmalloc's and initializes all the structures.
+ * Can't be used if all structures don't have the same lifetime.
+ */
+extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt,
+ unsigned nr_csrows, unsigned nr_chans);
+
+/* Free an mc previously allocated by edac_mc_alloc() */
+extern void edac_mc_free(struct mem_ctl_info *mci);
+
+
+#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
new file mode 100644
index 000000000000..52596e75f9c2
--- /dev/null
+++ b/drivers/edac/i82860_edac.c
@@ -0,0 +1,299 @@
+/*
+ * Intel 82860 Memory Controller kernel module
+ * (C) 2005 Red Hat (http://www.redhat.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Ben Woodard <woodard@redhat.com>
+ * shamelessly copied from and based upon the edac_i82875 driver
+ * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_82860_0
+#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
+#endif /* PCI_DEVICE_ID_INTEL_82860_0 */
+
+#define I82860_MCHCFG 0x50
+#define I82860_GBA 0x60
+#define I82860_GBA_MASK 0x7FF
+#define I82860_GBA_SHIFT 24
+#define I82860_ERRSTS 0xC8
+#define I82860_EAP 0xE4
+#define I82860_DERRCTL_STS 0xE2
+
+enum i82860_chips {
+ I82860 = 0,
+};
+
+struct i82860_dev_info {
+ const char *ctl_name;
+};
+
+struct i82860_error_info {
+ u16 errsts;
+ u32 eap;
+ u16 derrsyn;
+ u16 errsts2;
+};
+
+static const struct i82860_dev_info i82860_devs[] = {
+ [I82860] = {
+ .ctl_name = "i82860"},
+};
+
+static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
+ has already registered driver */
+
+static int i82860_registered = 1;
+
+static void i82860_get_error_info (struct mem_ctl_info *mci,
+ struct i82860_error_info *info)
+{
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts);
+ pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
+ pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, &info->derrsyn);
+ pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts2);
+
+ pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
+
+ /*
+ * If the error is the same for both reads then the first set of reads
+ * is valid. If there is a change then there is a CE no info and the
+ * second set of reads is valid and should be UE info.
+ */
+ if (!(info->errsts2 & 0x0003))
+ return;
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
+ pci_read_config_word(mci->pdev, I82860_DERRCTL_STS,
+ &info->derrsyn);
+ }
+}
+
+static int i82860_process_error_info (struct mem_ctl_info *mci,
+ struct i82860_error_info *info, int handle_errors)
+{
+ int row;
+
+ if (!(info->errsts2 & 0x0003))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ info->eap >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+ if (info->errsts & 0x0002)
+ edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+ else
+ edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ 0, "i82860 UE");
+
+ return 1;
+}
+
+static void i82860_check(struct mem_ctl_info *mci)
+{
+ struct i82860_error_info info;
+
+ debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ i82860_get_error_info(mci, &info);
+ i82860_process_error_info(mci, &info, 1);
+}
+
+static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ struct mem_ctl_info *mci = NULL;
+ unsigned long last_cumul_size;
+
+ u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+
+ /* RDRAM has channels but these don't map onto the abstractions that
+ edac uses.
+ The device groups from the GRA registers seem to map reasonably
+ well onto the notion of a chip select row.
+ There are 16 GRA registers and since the name is associated with
+ the channel and the GRA registers map to physical devices so we are
+ going to make 1 channel for group.
+ */
+ mci = edac_mc_alloc(0, 16, 1);
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+ mci->pdev = pdev;
+ mci->mtype_cap = MEM_FLAG_DDR;
+
+
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ /* I"m not sure about this but I think that all RDRAM is SECDED */
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ /* adjust FLAGS */
+
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.1.2.6 $";
+ mci->ctl_name = i82860_devs[dev_idx].ctl_name;
+ mci->edac_check = i82860_check;
+ mci->ctl_page_to_phys = NULL;
+
+ pci_read_config_word(mci->pdev, I82860_MCHCFG, &mchcfg_ddim);
+ mchcfg_ddim = mchcfg_ddim & 0x180;
+
+ /*
+ * The group row boundary (GRA) reg values are boundary address
+ * for each DRAM row with a granularity of 16MB. GRA regs are
+ * cumulative; therefore GRA15 will contain the total memory contained
+ * in all eight rows.
+ */
+ for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+ u16 value;
+ u32 cumul_size;
+ struct csrow_info *csrow = &mci->csrows[index];
+
+ pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
+ &value);
+
+ cumul_size = (value & I82860_GBA_MASK) <<
+ (I82860_GBA_SHIFT - PAGE_SHIFT);
+ debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+ __func__, index, cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
+ csrow->mtype = MEM_RMBS;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
+
+ /* clear counters */
+ pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n",
+ __func__);
+ edac_mc_free(mci);
+ } else {
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ rc = 0;
+ }
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82860_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ printk(KERN_INFO "i82860 init one\n");
+ if(pci_enable_device(pdev) < 0)
+ return -EIO;
+ rc = i82860_probe1(pdev, ent->driver_data);
+ if(rc == 0)
+ mci_pdev = pci_dev_get(pdev);
+ return rc;
+}
+
+static void __devexit i82860_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ mci = edac_mc_find_mci_by_pdev(pdev);
+ if ((mci != NULL) && (edac_mc_del_mc(mci) == 0))
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82860},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
+
+static struct pci_driver i82860_driver = {
+ .name = BS_MOD_STR,
+ .probe = i82860_init_one,
+ .remove = __devexit_p(i82860_remove_one),
+ .id_table = i82860_pci_tbl,
+};
+
+static int __init i82860_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
+ return pci_rc;
+
+ if (!mci_pdev) {
+ i82860_registered = 0;
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82860_0, NULL);
+ if (mci_pdev == NULL) {
+ debugf0("860 pci_get_device fail\n");
+ return -ENODEV;
+ }
+ pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("860 init fail\n");
+ pci_dev_put(mci_pdev);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static void __exit i82860_exit(void)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ pci_unregister_driver(&i82860_driver);
+ if (!i82860_registered) {
+ i82860_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(i82860_init);
+module_exit(i82860_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR
+ ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>");
+MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
new file mode 100644
index 000000000000..009c08fe5d69
--- /dev/null
+++ b/drivers/edac/i82875p_edac.c
@@ -0,0 +1,532 @@
+/*
+ * Intel D82875P Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Contributors:
+ * Wang Zhenyu at intel.com
+ *
+ * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_0
+#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
+#endif /* PCI_DEVICE_ID_INTEL_82875_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_6
+#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
+#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
+
+
+/* four csrows in dual channel, eight in single channel */
+#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+
+
+/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
+#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
+ *
+ * 31:12 block address
+ * 11:0 reserved
+ */
+
+#define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
+ *
+ * 7:0 DRAM ECC Syndrome
+ */
+
+#define I82875P_DES 0x5d /* DRAM Error Status (8b)
+ *
+ * 7:1 reserved
+ * 0 Error channel 0/1
+ */
+
+#define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15:10 reserved
+ * 9 non-DRAM lock error (ndlock)
+ * 8 Sftwr Generated SMI
+ * 7 ECC UE
+ * 6 reserved
+ * 5 MCH detects unimplemented cycle
+ * 4 AGP access outside GA
+ * 3 Invalid AGP access
+ * 2 Invalid GA translation table
+ * 1 Unsupported AGP command
+ * 0 ECC CE
+ */
+
+#define I82875P_ERRCMD 0xca /* Error Command (16b)
+ *
+ * 15:10 reserved
+ * 9 SERR on non-DRAM lock
+ * 8 SERR on ECC UE
+ * 7 SERR on ECC CE
+ * 6 target abort on high exception
+ * 5 detect unimplemented cyc
+ * 4 AGP access outside of GA
+ * 3 SERR on invalid AGP access
+ * 2 invalid translation table
+ * 1 SERR on unsupported AGP command
+ * 0 reserved
+ */
+
+
+/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
+#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
+ *
+ * 15:10 reserved
+ * 9 fast back-to-back - ro 0
+ * 8 SERR enable - ro 0
+ * 7 addr/data stepping - ro 0
+ * 6 parity err enable - ro 0
+ * 5 VGA palette snoop - ro 0
+ * 4 mem wr & invalidate - ro 0
+ * 3 special cycle - ro 0
+ * 2 bus master - ro 0
+ * 1 mem access dev6 - 0(dis),1(en)
+ * 0 IO access dev3 - 0(dis),1(en)
+ */
+
+#define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
+ *
+ * 31:12 mem base addr [31:12]
+ * 11:4 address mask - ro 0
+ * 3 prefetchable - ro 0(non),1(pre)
+ * 2:1 mem type - ro 0
+ * 0 mem space - ro 0
+ */
+
+/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
+
+#define I82875P_DRB_SHIFT 26 /* 64MiB grain */
+#define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
+ *
+ * 7 reserved
+ * 6:0 64MiB row boundary addr
+ */
+
+#define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
+ *
+ * 7 reserved
+ * 6:4 row attr row 1
+ * 3 reserved
+ * 2:0 row attr row 0
+ *
+ * 000 = 4KiB
+ * 001 = 8KiB
+ * 010 = 16KiB
+ * 011 = 32KiB
+ */
+
+#define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
+ *
+ * 31:30 reserved
+ * 29 init complete
+ * 28:23 reserved
+ * 22:21 nr chan 00=1,01=2
+ * 20 reserved
+ * 19:18 Data Integ Mode 00=none,01=ecc
+ * 17:11 reserved
+ * 10:8 refresh mode
+ * 7 reserved
+ * 6:4 mode select
+ * 3:2 reserved
+ * 1:0 DRAM type 01=DDR
+ */
+
+
+enum i82875p_chips {
+ I82875P = 0,
+};
+
+
+struct i82875p_pvt {
+ struct pci_dev *ovrfl_pdev;
+ void *ovrfl_window;
+};
+
+
+struct i82875p_dev_info {
+ const char *ctl_name;
+};
+
+
+struct i82875p_error_info {
+ u16 errsts;
+ u32 eap;
+ u8 des;
+ u8 derrsyn;
+ u16 errsts2;
+};
+
+
+static const struct i82875p_dev_info i82875p_devs[] = {
+ [I82875P] = {
+ .ctl_name = "i82875p"},
+};
+
+static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
+ has already registered driver */
+static int i82875p_registered = 1;
+
+static void i82875p_get_error_info (struct mem_ctl_info *mci,
+ struct i82875p_error_info *info)
+{
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts);
+ pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
+ pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
+ pci_read_config_byte(mci->pdev, I82875P_DERRSYN, &info->derrsyn);
+ pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts2);
+
+ pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
+
+ /*
+ * If the error is the same then we can for both reads then
+ * the first set of reads is valid. If there is a change then
+ * there is a CE no info and the second set of reads is valid
+ * and should be UE info.
+ */
+ if (!(info->errsts2 & 0x0081))
+ return;
+ if ((info->errsts ^ info->errsts2) & 0x0081) {
+ pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
+ pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
+ pci_read_config_byte(mci->pdev, I82875P_DERRSYN,
+ &info->derrsyn);
+ }
+}
+
+static int i82875p_process_error_info (struct mem_ctl_info *mci,
+ struct i82875p_error_info *info, int handle_errors)
+{
+ int row, multi_chan;
+
+ multi_chan = mci->csrows[0].nr_channels - 1;
+
+ if (!(info->errsts2 & 0x0081))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & 0x0081) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ info->eap >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+ if (info->errsts & 0x0080)
+ edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+ else
+ edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ multi_chan ? (info->des & 0x1) : 0,
+ "i82875p CE");
+
+ return 1;
+}
+
+
+static void i82875p_check(struct mem_ctl_info *mci)
+{
+ struct i82875p_error_info info;
+
+ debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ i82875p_get_error_info(mci, &info);
+ i82875p_process_error_info(mci, &info, 1);
+}
+
+
+#ifdef CONFIG_PROC_FS
+extern int pci_proc_attach_device(struct pci_dev *);
+#endif
+
+static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ struct mem_ctl_info *mci = NULL;
+ struct i82875p_pvt *pvt = NULL;
+ unsigned long last_cumul_size;
+ struct pci_dev *ovrfl_pdev;
+ void __iomem *ovrfl_window = NULL;
+
+ u32 drc;
+ u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
+ u32 nr_chans;
+ u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
+
+ if (!ovrfl_pdev) {
+ /*
+ * Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+ * the DRBs - this is where we expose device 6.
+ * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+ */
+ pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
+ ovrfl_pdev =
+ pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
+ if (!ovrfl_pdev)
+ goto fail;
+ }
+#ifdef CONFIG_PROC_FS
+ if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
+ printk(KERN_ERR "MC: " __FILE__
+ ": %s(): Failed to attach overflow device\n",
+ __func__);
+ goto fail;
+ }
+#endif /* CONFIG_PROC_FS */
+ if (pci_enable_device(ovrfl_pdev)) {
+ printk(KERN_ERR "MC: " __FILE__
+ ": %s(): Failed to enable overflow device\n",
+ __func__);
+ goto fail;
+ }
+
+ if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
+#ifdef CORRECT_BIOS
+ goto fail;
+#endif
+ }
+ /* cache is irrelevant for PCI bus reads/writes */
+ ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
+ pci_resource_len(ovrfl_pdev, 0));
+
+ if (!ovrfl_window) {
+ printk(KERN_ERR "MC: " __FILE__
+ ": %s(): Failed to ioremap bar6\n", __func__);
+ goto fail;
+ }
+
+ /* need to find out the number of channels */
+ drc = readl(ovrfl_window + I82875P_DRC);
+ drc_chan = ((drc >> 21) & 0x1);
+ nr_chans = drc_chan + 1;
+ drc_ddim = (drc >> 18) & 0x1;
+
+ mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
+ nr_chans);
+
+ if (!mci) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+ mci->pdev = pdev;
+ mci->mtype_cap = MEM_FLAG_DDR;
+
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_UNKNOWN;
+ /* adjust FLAGS */
+
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.5.2.11 $";
+ mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
+ mci->edac_check = i82875p_check;
+ mci->ctl_page_to_phys = NULL;
+
+ debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+
+ pvt = (struct i82875p_pvt *) mci->pvt_info;
+ pvt->ovrfl_pdev = ovrfl_pdev;
+ pvt->ovrfl_window = ovrfl_window;
+
+ /*
+ * The dram row boundary (DRB) reg values are boundary address
+ * for each DRAM row with a granularity of 32 or 64MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ */
+ for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+ u8 value;
+ u32 cumul_size;
+ struct csrow_info *csrow = &mci->csrows[index];
+
+ value = readb(ovrfl_window + I82875P_DRB + index);
+ cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
+ debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+ __func__, index, cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
+ csrow->mtype = MEM_DDR;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
+
+ /* clear counters */
+ pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+ fail:
+ if (mci)
+ edac_mc_free(mci);
+
+ if (ovrfl_window)
+ iounmap(ovrfl_window);
+
+ if (ovrfl_pdev) {
+ pci_release_regions(ovrfl_pdev);
+ pci_disable_device(ovrfl_pdev);
+ }
+
+ /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
+ return rc;
+}
+
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82875p_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ printk(KERN_INFO "i82875p init one\n");
+ if(pci_enable_device(pdev) < 0)
+ return -EIO;
+ rc = i82875p_probe1(pdev, ent->driver_data);
+ if (mci_pdev == NULL)
+ mci_pdev = pci_dev_get(pdev);
+ return rc;
+}
+
+
+static void __devexit i82875p_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i82875p_pvt *pvt = NULL;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+ return;
+
+ pvt = (struct i82875p_pvt *) mci->pvt_info;
+ if (pvt->ovrfl_window)
+ iounmap(pvt->ovrfl_window);
+
+ if (pvt->ovrfl_pdev) {
+#ifdef CORRECT_BIOS
+ pci_release_regions(pvt->ovrfl_pdev);
+#endif /*CORRECT_BIOS */
+ pci_disable_device(pvt->ovrfl_pdev);
+ pci_dev_put(pvt->ovrfl_pdev);
+ }
+
+ if (edac_mc_del_mc(mci))
+ return;
+
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82875P},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
+
+
+static struct pci_driver i82875p_driver = {
+ .name = BS_MOD_STR,
+ .probe = i82875p_init_one,
+ .remove = __devexit_p(i82875p_remove_one),
+ .id_table = i82875p_pci_tbl,
+};
+
+
+static int __init i82875p_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ pci_rc = pci_register_driver(&i82875p_driver);
+ if (pci_rc < 0)
+ return pci_rc;
+ if (mci_pdev == NULL) {
+ i82875p_registered = 0;
+ mci_pdev =
+ pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82875_0, NULL);
+ if (!mci_pdev) {
+ debugf0("875p pci_get_device fail\n");
+ return -ENODEV;
+ }
+ pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("875p init fail\n");
+ pci_dev_put(mci_pdev);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+
+static void __exit i82875p_exit(void)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ pci_unregister_driver(&i82875p_driver);
+ if (!i82875p_registered) {
+ i82875p_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+
+module_init(i82875p_init);
+module_exit(i82875p_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
new file mode 100644
index 000000000000..e90892831b90
--- /dev/null
+++ b/drivers/edac/r82600_edac.c
@@ -0,0 +1,407 @@
+/*
+ * Radisys 82600 Embedded chipset Memory Controller kernel module
+ * (C) 2005 EADS Astrium
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
+ * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
+ *
+ * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Written with reference to 82600 High Integration Dual PCI System
+ * Controller Data Book:
+ * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
+ * references to this document given in []
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+/* Radisys say "The 82600 integrates a main memory SDRAM controller that
+ * supports up to four banks of memory. The four banks can support a mix of
+ * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
+ * each of which can be any size from 16MB to 512MB. Both registered (control
+ * signals buffered) and unbuffered DIMM types are supported. Mixing of
+ * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
+ * is not allowed. The 82600 SDRAM interface operates at the same frequency as
+ * the CPU bus, 66MHz, 100MHz or 133MHz."
+ */
+
+#define R82600_NR_CSROWS 4
+#define R82600_NR_CHANS 1
+#define R82600_NR_DIMMS 4
+
+#define R82600_BRIDGE_ID 0x8200
+
+/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
+#define R82600_DRAMC 0x57 /* Various SDRAM related control bits
+ * all bits are R/W
+ *
+ * 7 SDRAM ISA Hole Enable
+ * 6 Flash Page Mode Enable
+ * 5 ECC Enable: 1=ECC 0=noECC
+ * 4 DRAM DIMM Type: 1=
+ * 3 BIOS Alias Disable
+ * 2 SDRAM BIOS Flash Write Enable
+ * 1:0 SDRAM Refresh Rate: 00=Disabled
+ * 01=7.8usec (256Mbit SDRAMs)
+ * 10=15.6us 11=125usec
+ */
+
+#define R82600_SDRAMC 0x76 /* "SDRAM Control Register"
+ * More SDRAM related control bits
+ * all bits are R/W
+ *
+ * 15:8 Reserved.
+ *
+ * 7:5 Special SDRAM Mode Select
+ *
+ * 4 Force ECC
+ *
+ * 1=Drive ECC bits to 0 during
+ * write cycles (i.e. ECC test mode)
+ *
+ * 0=Normal ECC functioning
+ *
+ * 3 Enhanced Paging Enable
+ *
+ * 2 CAS# Latency 0=3clks 1=2clks
+ *
+ * 1 RAS# to CAS# Delay 0=3 1=2
+ *
+ * 0 RAS# Precharge 0=3 1=2
+ */
+
+#define R82600_EAP 0x80 /* ECC Error Address Pointer Register
+ *
+ * 31 Disable Hardware Scrubbing (RW)
+ * 0=Scrub on corrected read
+ * 1=Don't scrub on corrected read
+ *
+ * 30:12 Error Address Pointer (RO)
+ * Upper 19 bits of error address
+ *
+ * 11:4 Syndrome Bits (RO)
+ *
+ * 3 BSERR# on multibit error (RW)
+ * 1=enable 0=disable
+ *
+ * 2 NMI on Single Bit Eror (RW)
+ * 1=NMI triggered by SBE n.b. other
+ * prerequeists
+ * 0=NMI not triggered
+ *
+ * 1 MBE (R/WC)
+ * read 1=MBE at EAP (see above)
+ * read 0=no MBE, or SBE occurred first
+ * write 1=Clear MBE status (must also
+ * clear SBE)
+ * write 0=NOP
+ *
+ * 1 SBE (R/WC)
+ * read 1=SBE at EAP (see above)
+ * read 0=no SBE, or MBE occurred first
+ * write 1=Clear SBE status (must also
+ * clear MBE)
+ * write 0=NOP
+ */
+
+#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address
+ * Registers
+ *
+ * 7:0 Address lines 30:24 - upper limit of
+ * each row [p57]
+ */
+
+struct r82600_error_info {
+ u32 eapr;
+};
+
+
+static unsigned int disable_hardware_scrub = 0;
+
+
+static void r82600_get_error_info (struct mem_ctl_info *mci,
+ struct r82600_error_info *info)
+{
+ pci_read_config_dword(mci->pdev, R82600_EAP, &info->eapr);
+
+ if (info->eapr & BIT(0))
+ /* Clear error to allow next error to be reported [p.62] */
+ pci_write_bits32(mci->pdev, R82600_EAP,
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
+
+ if (info->eapr & BIT(1))
+ /* Clear error to allow next error to be reported [p.62] */
+ pci_write_bits32(mci->pdev, R82600_EAP,
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
+}
+
+
+static int r82600_process_error_info (struct mem_ctl_info *mci,
+ struct r82600_error_info *info, int handle_errors)
+{
+ int error_found;
+ u32 eapaddr, page;
+ u32 syndrome;
+
+ error_found = 0;
+
+ /* bits 30:12 store the upper 19 bits of the 32 bit error address */
+ eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
+ /* Syndrome in bits 11:4 [p.62] */
+ syndrome = (info->eapr >> 4) & 0xFF;
+
+ /* the R82600 reports at less than page *
+ * granularity (upper 19 bits only) */
+ page = eapaddr >> PAGE_SHIFT;
+
+ if (info->eapr & BIT(0)) { /* CE? */
+ error_found = 1;
+
+ if (handle_errors)
+ edac_mc_handle_ce(
+ mci, page, 0, /* not avail */
+ syndrome,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, /* channel */
+ mci->ctl_name);
+ }
+
+ if (info->eapr & BIT(1)) { /* UE? */
+ error_found = 1;
+
+ if (handle_errors)
+ /* 82600 doesn't give enough info */
+ edac_mc_handle_ue(mci, page, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ mci->ctl_name);
+ }
+
+ return error_found;
+}
+
+static void r82600_check(struct mem_ctl_info *mci)
+{
+ struct r82600_error_info info;
+
+ debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ r82600_get_error_info(mci, &info);
+ r82600_process_error_info(mci, &info, 1);
+}
+
+static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ struct mem_ctl_info *mci = NULL;
+ u8 dramcr;
+ u32 ecc_on;
+ u32 reg_sdram;
+ u32 eapr;
+ u32 scrub_disabled;
+ u32 sdram_refresh_rate;
+ u32 row_high_limit_last = 0;
+ u32 eap_init_bits;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+
+ pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
+ pci_read_config_dword(pdev, R82600_EAP, &eapr);
+
+ ecc_on = dramcr & BIT(5);
+ reg_sdram = dramcr & BIT(4);
+ scrub_disabled = eapr & BIT(31);
+ sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
+
+ debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n",
+ __func__, sdram_refresh_rate);
+
+ debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__,
+ dramcr);
+
+ mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
+
+ if (mci == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+ mci->pdev = pdev;
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ /* FIXME try to work out if the chip leads have been *
+ * used for COM2 instead on this board? [MA6?] MAYBE: */
+
+ /* On the R82600, the pins for memory bits 72:65 - i.e. the *
+ * EC bits are shared with the pins for COM2 (!), so if COM2 *
+ * is enabled, we assume COM2 is wired up, and thus no EDAC *
+ * is possible. */
+ mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ if (ecc_on) {
+ if (scrub_disabled)
+ debugf3("MC: " __FILE__ ": %s(): mci = %p - "
+ "Scrubbing disabled! EAP: %#0x\n", __func__,
+ mci, eapr);
+ } else
+ mci->edac_cap = EDAC_FLAG_NONE;
+
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.1.2.6 $";
+ mci->ctl_name = "R82600";
+ mci->edac_check = r82600_check;
+ mci->ctl_page_to_phys = NULL;
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ struct csrow_info *csrow = &mci->csrows[index];
+ u8 drbar; /* sDram Row Boundry Address Register */
+ u32 row_high_limit;
+ u32 row_base;
+
+ /* find the DRAM Chip Select Base address and mask */
+ pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
+
+ debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n",
+ mci->mc_idx, __func__, index, drbar);
+
+ row_high_limit = ((u32) drbar << 24);
+/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
+
+ debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
+ "Boundry Address=%#0x, Last = %#0x \n",
+ mci->mc_idx, __func__, index, row_high_limit,
+ row_high_limit_last);
+
+ /* Empty row [p.57] */
+ if (row_high_limit == row_high_limit_last)
+ continue;
+
+ row_base = row_high_limit_last;
+
+ csrow->first_page = row_base >> PAGE_SHIFT;
+ csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
+ csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+ /* Error address is top 19 bits - so granularity is *
+ * 14 bits */
+ csrow->grain = 1 << 14;
+ csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+ /* FIXME - check that this is unknowable with this chipset */
+ csrow->dtype = DEV_UNKNOWN;
+
+ /* Mode is global on 82600 */
+ csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+ row_high_limit_last = row_high_limit;
+ }
+
+ /* clear counters */
+ /* FIXME should we? */
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+
+ /* Clear error flags to allow next error to be reported [p.62] */
+ /* Test systems seem to always have the UE flag raised on boot */
+
+ eap_init_bits = BIT(0) & BIT(1);
+ if (disable_hardware_scrub) {
+ eap_init_bits |= BIT(31);
+ debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub "
+ "(scrub on error)\n", __func__);
+ }
+
+ pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits,
+ eap_init_bits);
+
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ if (mci)
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit r82600_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* don't need to call pci_device_enable() */
+ return r82600_probe1(pdev, ent->driver_data);
+}
+
+
+static void __devexit r82600_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) &&
+ !edac_mc_del_mc(mci))
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
+
+
+static struct pci_driver r82600_driver = {
+ .name = BS_MOD_STR,
+ .probe = r82600_init_one,
+ .remove = __devexit_p(r82600_remove_one),
+ .id_table = r82600_pci_tbl,
+};
+
+
+static int __init r82600_init(void)
+{
+ return pci_register_driver(&r82600_driver);
+}
+
+
+static void __exit r82600_exit(void)
+{
+ pci_unregister_driver(&r82600_driver);
+}
+
+
+module_init(r82600_init);
+module_exit(r82600_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
+ "on behalf of EADS Astrium");
+MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
+
+module_param(disable_hardware_scrub, bool, 0644);
+MODULE_PARM_DESC(disable_hardware_scrub,
+ "If set, disable the chipset's automatic scrub for CEs");
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index ca99979c868a..8b3515f394a6 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -8,6 +8,7 @@
* completion notification.
*/
+#include <asm/types.h>
#include <asm/atomic.h>
#include <linux/blkdev.h>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1421941487c4..626508afe1b1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -7,6 +7,7 @@ menu "Network device support"
config NETDEVICES
depends on NET
+ default y if UML
bool "Network device support"
---help---
You can say N here if you don't intend to connect your Linux box to
@@ -1914,6 +1915,15 @@ config E1000_NAPI
If in doubt, say N.
+config E1000_DISABLE_PACKET_SPLIT
+ bool "Disable Packet Split for PCI express adapters"
+ depends on E1000
+ help
+ Say Y here if you want to use the legacy receive path for PCI express
+ hadware.
+
+ If in doubt, say N.
+
source "drivers/net/ixp2000/Kconfig"
config MYRI_SBUS
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 7aa49b974dc5..df9d6e80c4f2 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2136,7 +2136,7 @@ static int __init b44_init(void)
/* Setup paramaters for syncing RX/TX DMA descriptors */
dma_desc_align_mask = ~(dma_desc_align_size - 1);
- dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc));
+ dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
return pci_module_init(&b44_driver);
}
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 1f7ca453bb4a..6e295fce5c6f 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -335,6 +335,30 @@ static inline void cas_mask_intr(struct cas *cp)
cas_disable_irq(cp, i);
}
+static inline void cas_buffer_init(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ atomic_set((atomic_t *)&page->lru.next, 1);
+}
+
+static inline int cas_buffer_count(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ return atomic_read((atomic_t *)&page->lru.next);
+}
+
+static inline void cas_buffer_inc(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ atomic_inc((atomic_t *)&page->lru.next);
+}
+
+static inline void cas_buffer_dec(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ atomic_dec((atomic_t *)&page->lru.next);
+}
+
static void cas_enable_irq(struct cas *cp, const int ring)
{
if (ring == 0) { /* all but TX_DONE */
@@ -472,6 +496,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
{
pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
PCI_DMA_FROMDEVICE);
+ cas_buffer_dec(page);
__free_pages(page->buffer, cp->page_order);
kfree(page);
return 0;
@@ -501,6 +526,7 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
page->buffer = alloc_pages(flags, cp->page_order);
if (!page->buffer)
goto page_err;
+ cas_buffer_init(page);
page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
cp->page_size, PCI_DMA_FROMDEVICE);
return page;
@@ -579,7 +605,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
list_for_each_safe(elem, tmp, &list) {
cas_page_t *page = list_entry(elem, cas_page_t, list);
- if (page_count(page->buffer) > 1)
+ if (cas_buffer_count(page) > 1)
continue;
list_del(elem);
@@ -1347,7 +1373,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
cas_page_t *page = cp->rx_pages[1][index];
cas_page_t *new;
- if (page_count(page->buffer) == 1)
+ if (cas_buffer_count(page) == 1)
return page;
new = cas_page_dequeue(cp);
@@ -1367,7 +1393,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
cas_page_t **page1 = cp->rx_pages[1];
/* swap if buffer is in use */
- if (page_count(page0[index]->buffer) > 1) {
+ if (cas_buffer_count(page0[index]) > 1) {
cas_page_t *new = cas_page_spare(cp, index);
if (new) {
page1[index] = page0[index];
@@ -1925,8 +1951,8 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
#endif
if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
- cp->dev->name, status, compwb);
+ printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
+ cp->dev->name, status, (unsigned long long)compwb);
/* process all the rings */
for (ring = 0; ring < N_TX_RINGS; ring++) {
#ifdef USE_TX_COMPWB
@@ -2039,6 +2065,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
skb->len += hlen - swivel;
get_page(page->buffer);
+ cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = off;
frag->size = hlen - swivel;
@@ -2063,6 +2090,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
frag++;
get_page(page->buffer);
+ cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = 0;
frag->size = hlen;
@@ -2225,7 +2253,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
released = 0;
while (entry != last) {
/* make a new buffer if it's still in use */
- if (page_count(page[entry]->buffer) > 1) {
+ if (cas_buffer_count(page[entry]) > 1) {
cas_page_t *new = cas_page_dequeue(cp);
if (!new) {
/* let the timer know that we need to
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 4726722a0635..bf1fd2b98bf8 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,25 +1,25 @@
/*******************************************************************************
-
+
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 2 of the License, or (at your option)
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
any later version.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
-
+
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc., 59
+ this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
+
The full GNU General Public License is included in this distribution in the
file called LICENSE.
-
+
Contact Information:
Linux NICS <linux.nics@intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -160,7 +160,7 @@
#define DRV_NAME "e100"
#define DRV_EXT "-NAPI"
-#define DRV_VERSION "3.4.14-k4"DRV_EXT
+#define DRV_VERSION "3.5.10-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
#define PFX DRV_NAME ": "
@@ -320,7 +320,7 @@ enum cuc_dump {
cuc_dump_complete = 0x0000A005,
cuc_dump_reset_complete = 0x0000A007,
};
-
+
enum port {
software_reset = 0x0000,
selftest = 0x0001,
@@ -715,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
e100_write_flush(nic); udelay(4);
-
+
writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
e100_write_flush(nic); udelay(4);
-
+
/* Eeprom drives a dummy zero to EEDO after receiving
* complete address. Use this to adjust addr_len. */
ctrl = readb(&nic->csr->eeprom_ctrl_lo);
@@ -726,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
*addr_len -= (i - 16);
i = 17;
}
-
+
data = (data << 1) | (ctrl & eedo ? 1 : 0);
}
@@ -1170,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
}
-static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
/* *INDENT-OFF* */
static struct {
@@ -1213,13 +1213,13 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
* driver can change the algorithm.
*
* INTDELAY - This loads the dead-man timer with its inital value.
-* When this timer expires the interrupt is asserted, and the
+* When this timer expires the interrupt is asserted, and the
* timer is reset each time a new packet is received. (see
* BUNDLEMAX below to set the limit on number of chained packets)
* The current default is 0x600 or 1536. Experiments show that
* the value should probably stay within the 0x200 - 0x1000.
*
-* BUNDLEMAX -
+* BUNDLEMAX -
* This sets the maximum number of frames that will be bundled. In
* some situations, such as the TCP windowing algorithm, it may be
* better to limit the growth of the bundle size than let it go as
@@ -1229,7 +1229,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
* an interrupt for every frame received. If you do not want to put
* a limit on the bundle size, set this value to xFFFF.
*
-* BUNDLESMALL -
+* BUNDLESMALL -
* This contains a bit-mask describing the minimum size frame that
* will be bundled. The default masks the lower 7 bits, which means
* that any frame less than 128 bytes in length will not be bundled,
@@ -1244,7 +1244,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
*
* The current default is 0xFF80, which masks out the lower 7 bits.
* This means that any frame which is x7F (127) bytes or smaller
-* will cause an immediate interrupt. Because this value must be a
+* will cause an immediate interrupt. Because this value must be a
* bit mask, there are only a few valid values that can be used. To
* turn this feature off, the driver can write the value xFFFF to the
* lower word of this instruction (in the same way that the other
@@ -1253,7 +1253,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
* standard Ethernet frames are <= 2047 bytes in length.
*************************************************************************/
-/* if you wish to disable the ucode functionality, while maintaining the
+/* if you wish to disable the ucode functionality, while maintaining the
* workarounds it provides, set the following defines to:
* BUNDLESMALL 0
* BUNDLEMAX 1
@@ -1284,12 +1284,46 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
for (i = 0; i < UCODE_SIZE; i++)
cb->u.ucode[i] = cpu_to_le32(ucode[i]);
- cb->command = cpu_to_le16(cb_ucode);
+ cb->command = cpu_to_le16(cb_ucode | cb_el);
return;
}
noloaducode:
- cb->command = cpu_to_le16(cb_nop);
+ cb->command = cpu_to_le16(cb_nop | cb_el);
+}
+
+static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
+ void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+{
+ int err = 0, counter = 50;
+ struct cb *cb = nic->cb_to_clean;
+
+ if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
+ DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
+
+ /* must restart cuc */
+ nic->cuc_cmd = cuc_start;
+
+ /* wait for completion */
+ e100_write_flush(nic);
+ udelay(10);
+
+ /* wait for possibly (ouch) 500ms */
+ while (!(cb->status & cpu_to_le16(cb_complete))) {
+ msleep(10);
+ if (!--counter) break;
+ }
+
+ /* ack any interupts, something could have been set */
+ writeb(~0, &nic->csr->scb.stat_ack);
+
+ /* if the command failed, or is not OK, notify and return */
+ if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
+ DPRINTK(PROBE,ERR, "ucode load failed\n");
+ err = -EPERM;
+ }
+
+ return err;
}
static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1357,13 +1391,13 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
}
- if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
+ if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
/* enable/disable MDI/MDI-X auto-switching.
MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
- (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
- !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
+ (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
+ !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
else
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
@@ -1388,7 +1422,7 @@ static int e100_hw_init(struct nic *nic)
return err;
if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
return err;
- if((err = e100_exec_cb(nic, NULL, e100_load_ucode)))
+ if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
return err;
if((err = e100_exec_cb(nic, NULL, e100_configure)))
return err;
@@ -1493,7 +1527,7 @@ static void e100_update_stats(struct nic *nic)
}
}
-
+
if(e100_exec_cmd(nic, cuc_dump_reset, 0))
DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
}
@@ -1542,10 +1576,10 @@ static void e100_watchdog(unsigned long data)
mii_check_link(&nic->mii);
/* Software generated interrupt to recover from (rare) Rx
- * allocation failure.
- * Unfortunately have to use a spinlock to not re-enable interrupts
- * accidentally, due to hardware that shares a register between the
- * interrupt mask bit and the SW Interrupt generation bit */
+ * allocation failure.
+ * Unfortunately have to use a spinlock to not re-enable interrupts
+ * accidentally, due to hardware that shares a register between the
+ * interrupt mask bit and the SW Interrupt generation bit */
spin_lock_irq(&nic->cmd_lock);
writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
spin_unlock_irq(&nic->cmd_lock);
@@ -1830,7 +1864,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
struct rx *rx_to_start = NULL;
/* are we already rnr? then pay attention!!! this ensures that
- * the state machine progression never allows a start with a
+ * the state machine progression never allows a start with a
* partially cleaned list, avoiding a race between hardware
* and rx_to_clean when in NAPI mode */
if(RU_SUSPENDED == nic->ru_running)
@@ -2066,7 +2100,7 @@ static void e100_tx_timeout(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
- /* Reset outside of interrupt context, to avoid request_irq
+ /* Reset outside of interrupt context, to avoid request_irq
* in interrupt context */
schedule_work(&nic->tx_timeout_task);
}
@@ -2313,7 +2347,7 @@ static int e100_set_ringparam(struct net_device *netdev,
struct param_range *rfds = &nic->params.rfds;
struct param_range *cbs = &nic->params.cbs;
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
if(netif_running(netdev))
@@ -2631,7 +2665,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->flags |= wol_magic;
/* ack any pending wake events, disable PME */
- pci_enable_wake(pdev, 0, 0);
+ err = pci_enable_wake(pdev, 0, 0);
+ if (err)
+ DPRINTK(PROBE, ERR, "Error clearing wake event\n");
strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev))) {
@@ -2682,6 +2718,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
+ int retval;
if(netif_running(netdev))
e100_down(nic);
@@ -2689,9 +2726,14 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(netdev);
pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic)));
+ retval = pci_enable_wake(pdev, pci_choose_state(pdev, state),
+ nic->flags & (wol_magic | e100_asf(nic)));
+ if (retval)
+ DPRINTK(PROBE,ERR, "Error enabling wake\n");
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ if (retval)
+ DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval);
return 0;
}
@@ -2700,11 +2742,16 @@ static int e100_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
+ int retval;
- pci_set_power_state(pdev, PCI_D0);
+ retval = pci_set_power_state(pdev, PCI_D0);
+ if (retval)
+ DPRINTK(PROBE,ERR, "Error waking adapter\n");
pci_restore_state(pdev);
/* ack any pending wake events, disable PME */
- pci_enable_wake(pdev, 0, 0);
+ retval = pci_enable_wake(pdev, 0, 0);
+ if (retval)
+ DPRINTK(PROBE,ERR, "Error clearing wake events\n");
if(e100_hw_init(nic))
DPRINTK(HW, ERR, "e100_hw_init failed\n");
@@ -2721,12 +2768,15 @@ static void e100_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
+ int retval;
#ifdef CONFIG_PM
- pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
+ retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
#else
- pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
+ retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
#endif
+ if (retval)
+ DPRINTK(PROBE,ERR, "Error enabling wake\n");
}
@@ -2739,7 +2789,7 @@ static struct pci_driver e100_driver = {
.suspend = e100_suspend,
.resume = e100_resume,
#endif
- .shutdown = e100_shutdown,
+ .shutdown = e100_shutdown,
};
static int __init e100_init_module(void)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index e02e9ba2e18b..27c77306193b 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -72,10 +72,6 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#ifdef CONFIG_E1000_MQ
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#endif
#define BAR_0 0
#define BAR_1 1
@@ -87,6 +83,10 @@
struct e1000_adapter;
#include "e1000_hw.h"
+#ifdef CONFIG_E1000_MQ
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#endif
#ifdef DBG
#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
@@ -169,6 +169,13 @@ struct e1000_buffer {
uint16_t next_to_watch;
};
+#ifdef CONFIG_E1000_MQ
+struct e1000_queue_stats {
+ uint64_t packets;
+ uint64_t bytes;
+};
+#endif
+
struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
@@ -191,10 +198,12 @@ struct e1000_tx_ring {
spinlock_t tx_lock;
uint16_t tdh;
uint16_t tdt;
- uint64_t pkt;
boolean_t last_tx_tso;
+#ifdef CONFIG_E1000_MQ
+ struct e1000_queue_stats tx_stats;
+#endif
};
struct e1000_rx_ring {
@@ -216,9 +225,17 @@ struct e1000_rx_ring {
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
+ struct sk_buff *rx_skb_top;
+ struct sk_buff *rx_skb_prev;
+
+ /* cpu for rx queue */
+ int cpu;
+
uint16_t rdh;
uint16_t rdt;
- uint64_t pkt;
+#ifdef CONFIG_E1000_MQ
+ struct e1000_queue_stats rx_stats;
+#endif
};
#define E1000_DESC_UNUSED(R) \
@@ -251,6 +268,9 @@ struct e1000_adapter {
uint16_t link_speed;
uint16_t link_duplex;
spinlock_t stats_lock;
+#ifdef CONFIG_E1000_NAPI
+ spinlock_t tx_queue_lock;
+#endif
atomic_t irq_sem;
struct work_struct tx_timeout_task;
struct work_struct watchdog_task;
@@ -264,6 +284,7 @@ struct e1000_adapter {
#ifdef CONFIG_E1000_MQ
struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
#endif
+ unsigned long tx_queue_len;
uint32_t txd_cmd;
uint32_t tx_int_delay;
uint32_t tx_abs_int_delay;
@@ -271,9 +292,11 @@ struct e1000_adapter {
uint64_t gotcl_old;
uint64_t tpt_old;
uint64_t colc_old;
+ uint32_t tx_timeout_count;
uint32_t tx_fifo_head;
uint32_t tx_head_addr;
uint32_t tx_fifo_size;
+ uint8_t tx_timeout_factor;
atomic_t tx_fifo_stall;
boolean_t pcix_82544;
boolean_t detect_tx_hung;
@@ -281,14 +304,15 @@ struct e1000_adapter {
/* RX */
#ifdef CONFIG_E1000_NAPI
boolean_t (*clean_rx) (struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring,
- int *work_done, int work_to_do);
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
#else
boolean_t (*clean_rx) (struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring);
+ struct e1000_rx_ring *rx_ring);
#endif
void (*alloc_rx_buf) (struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring);
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
struct e1000_rx_ring *rx_ring; /* One per active queue */
#ifdef CONFIG_E1000_NAPI
struct net_device *polling_netdev; /* One per active queue */
@@ -296,13 +320,15 @@ struct e1000_adapter {
#ifdef CONFIG_E1000_MQ
struct net_device **cpu_netdev; /* per-cpu */
struct call_async_data_struct rx_sched_call_data;
- int cpu_for_queue[4];
+ cpumask_t cpumask;
#endif
- int num_queues;
+ int num_tx_queues;
+ int num_rx_queues;
uint64_t hw_csum_err;
uint64_t hw_csum_good;
uint64_t rx_hdr_split;
+ uint32_t alloc_rx_buff_failed;
uint32_t rx_int_delay;
uint32_t rx_abs_int_delay;
boolean_t rx_csum;
@@ -330,6 +356,7 @@ struct e1000_adapter {
struct e1000_rx_ring test_rx_ring;
+ u32 *config_space;
int msg_enable;
#ifdef CONFIG_PCI_MSI
boolean_t have_msi;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c88f1a3c1b1d..5cedc81786e3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "tx_deferred_ok", E1000_STAT(stats.dc) },
{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+ { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
{ "rx_long_length_errors", E1000_STAT(stats.roc) },
{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
{ "rx_header_split", E1000_STAT(rx_hdr_split) },
+ { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
};
-#define E1000_STATS_LEN \
+
+#ifdef CONFIG_E1000_MQ
+#define E1000_QUEUE_STATS_LEN \
+ (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
+ ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
+ * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
+#else
+#define E1000_QUEUE_STATS_LEN 0
+#endif
+#define E1000_GLOBAL_STATS_LEN \
sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)",
"Interrupt test (offline)", "Loopback test (offline)",
@@ -109,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if(hw->media_type == e1000_media_type_copper) {
+ if (hw->media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
@@ -121,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->advertising = ADVERTISED_TP;
- if(hw->autoneg == 1) {
+ if (hw->autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
@@ -132,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_TP;
ecmd->phy_address = hw->phy_addr;
- if(hw->mac_type == e1000_82543)
+ if (hw->mac_type == e1000_82543)
ecmd->transceiver = XCVR_EXTERNAL;
else
ecmd->transceiver = XCVR_INTERNAL;
@@ -148,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_FIBRE;
- if(hw->mac_type >= e1000_82545)
+ if (hw->mac_type >= e1000_82545)
ecmd->transceiver = XCVR_INTERNAL;
else
ecmd->transceiver = XCVR_EXTERNAL;
}
- if(netif_carrier_ok(adapter->netdev)) {
+ if (netif_carrier_ok(adapter->netdev)) {
e1000_get_speed_and_duplex(hw, &adapter->link_speed,
&adapter->link_duplex);
@@ -163,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* unfortunatly FULL_DUPLEX != DUPLEX_FULL
* and HALF_DUPLEX != DUPLEX_HALF */
- if(adapter->link_duplex == FULL_DUPLEX)
+ if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
@@ -183,13 +195,21 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if(ecmd->autoneg == AUTONEG_ENABLE) {
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex
+ * cannot be changed */
+ if (e1000_check_phy_reset_block(hw)) {
+ DPRINTK(DRV, ERR, "Cannot change link characteristics "
+ "when SoL/IDER is active.\n");
+ return -EINVAL;
+ }
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->autoneg = 1;
- if(hw->media_type == e1000_media_type_fiber)
+ if (hw->media_type == e1000_media_type_fiber)
hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
- else
+ else
hw->autoneg_advertised = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
@@ -199,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ADVERTISED_TP;
ecmd->advertising = hw->autoneg_advertised;
} else
- if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
+ if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
return -EINVAL;
/* reset the link */
- if(netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev)) {
e1000_down(adapter);
e1000_reset(adapter);
e1000_up(adapter);
@@ -221,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- pause->autoneg =
+ pause->autoneg =
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
-
- if(hw->fc == e1000_fc_rx_pause)
+
+ if (hw->fc == e1000_fc_rx_pause)
pause->rx_pause = 1;
- else if(hw->fc == e1000_fc_tx_pause)
+ else if (hw->fc == e1000_fc_tx_pause)
pause->tx_pause = 1;
- else if(hw->fc == e1000_fc_full) {
+ else if (hw->fc == e1000_fc_full) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
@@ -240,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
-
+
adapter->fc_autoneg = pause->autoneg;
- if(pause->rx_pause && pause->tx_pause)
+ if (pause->rx_pause && pause->tx_pause)
hw->fc = e1000_fc_full;
- else if(pause->rx_pause && !pause->tx_pause)
+ else if (pause->rx_pause && !pause->tx_pause)
hw->fc = e1000_fc_rx_pause;
- else if(!pause->rx_pause && pause->tx_pause)
+ else if (!pause->rx_pause && pause->tx_pause)
hw->fc = e1000_fc_tx_pause;
- else if(!pause->rx_pause && !pause->tx_pause)
+ else if (!pause->rx_pause && !pause->tx_pause)
hw->fc = e1000_fc_none;
hw->original_fc = hw->fc;
- if(adapter->fc_autoneg == AUTONEG_ENABLE) {
- if(netif_running(adapter->netdev)) {
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ if (netif_running(adapter->netdev)) {
e1000_down(adapter);
e1000_up(adapter);
} else
e1000_reset(adapter);
- }
- else
+ } else
return ((hw->media_type == e1000_media_type_fiber) ?
e1000_setup_link(hw) : e1000_force_mac_fc(hw));
-
+
return 0;
}
@@ -281,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
struct e1000_adapter *adapter = netdev_priv(netdev);
adapter->rx_csum = data;
- if(netif_running(netdev)) {
+ if (netif_running(netdev)) {
e1000_down(adapter);
e1000_up(adapter);
} else
e1000_reset(adapter);
return 0;
}
-
+
static uint32_t
e1000_get_tx_csum(struct net_device *netdev)
{
@@ -300,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if(adapter->hw.mac_type < e1000_82543) {
+ if (adapter->hw.mac_type < e1000_82543) {
if (!data)
return -EINVAL;
return 0;
@@ -319,8 +338,8 @@ static int
e1000_set_tso(struct net_device *netdev, uint32_t data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if((adapter->hw.mac_type < e1000_82544) ||
- (adapter->hw.mac_type == e1000_82547))
+ if ((adapter->hw.mac_type < e1000_82544) ||
+ (adapter->hw.mac_type == e1000_82547))
return data ? -EINVAL : 0;
if (data)
@@ -328,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
else
netdev->features &= ~NETIF_F_TSO;
return 0;
-}
+}
#endif /* NETIF_F_TSO */
static uint32_t
@@ -345,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data)
adapter->msg_enable = data;
}
-static int
+static int
e1000_get_regs_len(struct net_device *netdev)
{
#define E1000_REGS_LEN 32
@@ -381,7 +400,7 @@ e1000_get_regs(struct net_device *netdev,
regs_buff[11] = E1000_READ_REG(hw, TIDV);
regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
- if(hw->phy_type == e1000_phy_igp) {
+ if (hw->phy_type == e1000_phy_igp) {
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_AGC_A);
e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
@@ -435,7 +454,7 @@ e1000_get_regs(struct net_device *netdev,
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
- if(hw->mac_type >= e1000_82540 &&
+ if (hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) {
regs_buff[26] = E1000_READ_REG(hw, MANC);
}
@@ -459,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev,
int ret_val = 0;
uint16_t i;
- if(eeprom->len == 0)
+ if (eeprom->len == 0)
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -469,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev,
eeprom_buff = kmalloc(sizeof(uint16_t) *
(last_word - first_word + 1), GFP_KERNEL);
- if(!eeprom_buff)
+ if (!eeprom_buff)
return -ENOMEM;
- if(hw->eeprom.type == e1000_eeprom_spi)
+ if (hw->eeprom.type == e1000_eeprom_spi)
ret_val = e1000_read_eeprom(hw, first_word,
last_word - first_word + 1,
eeprom_buff);
else {
for (i = 0; i < last_word - first_word + 1; i++)
- if((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+ if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
&eeprom_buff[i])))
break;
}
@@ -505,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev,
int max_len, first_word, last_word, ret_val = 0;
uint16_t i;
- if(eeprom->len == 0)
+ if (eeprom->len == 0)
return -EOPNOTSUPP;
- if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
max_len = hw->eeprom.word_size * 2;
@@ -516,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev,
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if(!eeprom_buff)
+ if (!eeprom_buff)
return -ENOMEM;
ptr = (void *)eeprom_buff;
- if(eeprom->offset & 1) {
+ if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
ret_val = e1000_read_eeprom(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
- if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
ret_val = e1000_read_eeprom(hw, last_word, 1,
@@ -547,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000_write_eeprom(hw, first_word,
last_word - first_word + 1, eeprom_buff);
- /* Update the checksum over the first part of the EEPROM if needed
+ /* Update the checksum over the first part of the EEPROM if needed
* and flush shadow RAM for 82573 conrollers */
- if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
+ if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
(hw->mac_type == e1000_82573)))
e1000_update_eeprom_checksum(hw);
@@ -567,21 +586,21 @@ e1000_get_drvinfo(struct net_device *netdev,
strncpy(drvinfo->driver, e1000_driver_name, 32);
strncpy(drvinfo->version, e1000_driver_version, 32);
-
- /* EEPROM image version # is reported as firware version # for
+
+ /* EEPROM image version # is reported as firmware version # for
* 8257{1|2|3} controllers */
e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
switch (adapter->hw.mac_type) {
case e1000_82571:
case e1000_82572:
case e1000_82573:
- sprintf(firmware_version, "%d.%d-%d",
+ sprintf(firmware_version, "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4,
eeprom_data & 0x000F);
break;
default:
- sprintf(firmware_version, "n/a");
+ sprintf(firmware_version, "N/A");
}
strncpy(drvinfo->fw_version, firmware_version, 32);
@@ -613,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev,
ring->rx_jumbo_pending = 0;
}
-static int
+static int
e1000_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -623,8 +642,8 @@ e1000_set_ringparam(struct net_device *netdev,
struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
int i, err, tx_ring_size, rx_ring_size;
- tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
- rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+ tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
+ rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
if (netif_running(adapter->netdev))
e1000_down(adapter);
@@ -650,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev,
txdr = adapter->tx_ring;
rxdr = adapter->rx_ring;
- if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
E1000_MAX_RXD : E1000_MAX_82544_RXD));
- E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+ E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD));
- E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+ E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
txdr[i].count = txdr->count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
rxdr[i].count = rxdr->count;
- }
- if(netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
if ((err = e1000_setup_all_rx_resources(adapter)))
goto err_setup_rx;
@@ -688,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev,
kfree(rx_old);
adapter->rx_ring = rx_new;
adapter->tx_ring = tx_new;
- if((err = e1000_up(adapter)))
+ if ((err = e1000_up(adapter)))
return err;
}
@@ -707,10 +726,10 @@ err_setup_rx:
uint32_t pat, value; \
uint32_t test[] = \
{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
- for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
+ for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
value = E1000_READ_REG(&adapter->hw, R); \
- if(value != (test[pat] & W & M)) { \
+ if (value != (test[pat] & W & M)) { \
DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
"0x%08X expected 0x%08X\n", \
E1000_##R, value, (test[pat] & W & M)); \
@@ -726,7 +745,7 @@ err_setup_rx:
uint32_t value; \
E1000_WRITE_REG(&adapter->hw, R, W & M); \
value = E1000_READ_REG(&adapter->hw, R); \
- if((W & M) != (value & M)) { \
+ if ((W & M) != (value & M)) { \
DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
"expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
*data = (adapter->hw.mac_type < e1000_82543) ? \
@@ -762,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
- if(value != after) {
+ if (value != after) {
DPRINTK(DRV, ERR, "failed STATUS register test got: "
"0x%08X expected: 0x%08X\n", after, value);
*data = 1;
@@ -790,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
- if(adapter->hw.mac_type >= e1000_82543) {
+ if (adapter->hw.mac_type >= e1000_82543) {
REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
@@ -798,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
- for(i = 0; i < E1000_RAR_ENTRIES; i++) {
+ for (i = 0; i < E1000_RAR_ENTRIES; i++) {
REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
0xFFFFFFFF);
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -814,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
}
- for(i = 0; i < E1000_MC_TBL_SIZE; i++)
+ for (i = 0; i < E1000_MC_TBL_SIZE; i++)
REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
*data = 0;
@@ -830,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
*data = 0;
/* Read and add up the contents of the EEPROM */
- for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
- if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
*data = 1;
break;
}
@@ -839,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
}
/* If Checksum is not Correct return error else test passed */
- if((checksum != (uint16_t) EEPROM_SUM) && !(*data))
+ if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
*data = 2;
return *data;
@@ -868,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
*data = 0;
/* Hook up test interrupt handler just for this test */
- if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
+ if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
shared_int = FALSE;
- } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
+ } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
netdev->name, netdev)){
*data = 1;
return -1;
@@ -881,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
msec_delay(10);
/* Test each interrupt */
- for(; i < 10; i++) {
+ for (; i < 10; i++) {
/* Interrupt to test */
mask = 1 << i;
- if(!shared_int) {
+ if (!shared_int) {
/* Disable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
@@ -897,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
E1000_WRITE_REG(&adapter->hw, IMC, mask);
E1000_WRITE_REG(&adapter->hw, ICS, mask);
msec_delay(10);
-
- if(adapter->test_icr & mask) {
+
+ if (adapter->test_icr & mask) {
*data = 3;
break;
}
@@ -915,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
E1000_WRITE_REG(&adapter->hw, ICS, mask);
msec_delay(10);
- if(!(adapter->test_icr & mask)) {
+ if (!(adapter->test_icr & mask)) {
*data = 4;
break;
}
- if(!shared_int) {
+ if (!shared_int) {
/* Disable the other interrupts to be reported in
* the cause register and then force the other
* interrupts and see if any get posted. If
@@ -932,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
msec_delay(10);
- if(adapter->test_icr) {
+ if (adapter->test_icr) {
*data = 5;
break;
}
@@ -957,40 +976,39 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int i;
- if(txdr->desc && txdr->buffer_info) {
- for(i = 0; i < txdr->count; i++) {
- if(txdr->buffer_info[i].dma)
+ if (txdr->desc && txdr->buffer_info) {
+ for (i = 0; i < txdr->count; i++) {
+ if (txdr->buffer_info[i].dma)
pci_unmap_single(pdev, txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
PCI_DMA_TODEVICE);
- if(txdr->buffer_info[i].skb)
+ if (txdr->buffer_info[i].skb)
dev_kfree_skb(txdr->buffer_info[i].skb);
}
}
- if(rxdr->desc && rxdr->buffer_info) {
- for(i = 0; i < rxdr->count; i++) {
- if(rxdr->buffer_info[i].dma)
+ if (rxdr->desc && rxdr->buffer_info) {
+ for (i = 0; i < rxdr->count; i++) {
+ if (rxdr->buffer_info[i].dma)
pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length,
PCI_DMA_FROMDEVICE);
- if(rxdr->buffer_info[i].skb)
+ if (rxdr->buffer_info[i].skb)
dev_kfree_skb(rxdr->buffer_info[i].skb);
}
}
- if(txdr->desc) {
+ if (txdr->desc) {
pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
txdr->desc = NULL;
}
- if(rxdr->desc) {
+ if (rxdr->desc) {
pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
rxdr->desc = NULL;
}
kfree(txdr->buffer_info);
txdr->buffer_info = NULL;
-
kfree(rxdr->buffer_info);
rxdr->buffer_info = NULL;
@@ -1008,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
- if(!txdr->count)
- txdr->count = E1000_DEFAULT_TXD;
+ if (!txdr->count)
+ txdr->count = E1000_DEFAULT_TXD;
size = txdr->count * sizeof(struct e1000_buffer);
- if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
+ if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
ret_val = 1;
goto err_nomem;
}
@@ -1020,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
E1000_ROUNDUP(txdr->size, 4096);
- if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
+ if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
ret_val = 2;
goto err_nomem;
}
@@ -1039,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
- for(i = 0; i < txdr->count; i++) {
+ for (i = 0; i < txdr->count; i++) {
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
struct sk_buff *skb;
unsigned int size = 1024;
- if(!(skb = alloc_skb(size, GFP_KERNEL))) {
+ if (!(skb = alloc_skb(size, GFP_KERNEL))) {
ret_val = 3;
goto err_nomem;
}
@@ -1064,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
- if(!rxdr->count)
- rxdr->count = E1000_DEFAULT_RXD;
+ if (!rxdr->count)
+ rxdr->count = E1000_DEFAULT_RXD;
size = rxdr->count * sizeof(struct e1000_buffer);
- if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
+ if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
ret_val = 4;
goto err_nomem;
}
memset(rxdr->buffer_info, 0, size);
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
+ if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
ret_val = 5;
goto err_nomem;
}
@@ -1095,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
(adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- for(i = 0; i < rxdr->count; i++) {
+ for (i = 0; i < rxdr->count; i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
struct sk_buff *skb;
- if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
+ if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
GFP_KERNEL))) {
ret_val = 6;
goto err_nomem;
@@ -1208,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
/* Check Phy Configuration */
e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
- if(phy_reg != 0x4100)
+ if (phy_reg != 0x4100)
return 9;
e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
- if(phy_reg != 0x0070)
+ if (phy_reg != 0x0070)
return 10;
e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
- if(phy_reg != 0x001A)
+ if (phy_reg != 0x001A)
return 11;
return 0;
@@ -1230,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
adapter->hw.autoneg = FALSE;
- if(adapter->hw.phy_type == e1000_phy_m88) {
+ if (adapter->hw.phy_type == e1000_phy_m88) {
/* Auto-MDI/MDIX Off */
e1000_write_phy_reg(&adapter->hw,
M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1250,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
E1000_CTRL_FD); /* Force Duplex to FULL */
- if(adapter->hw.media_type == e1000_media_type_copper &&
+ if (adapter->hw.media_type == e1000_media_type_copper &&
adapter->hw.phy_type == e1000_phy_m88) {
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
} else {
/* Set the ILOS bit on the fiber Nic is half
* duplex link is detected. */
stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
- if((stat_reg & E1000_STATUS_FD) == 0)
+ if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
}
@@ -1266,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
/* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
- if(adapter->hw.phy_type == e1000_phy_m88)
+ if (adapter->hw.phy_type == e1000_phy_m88)
e1000_phy_disable_receiver(adapter);
udelay(500);
@@ -1282,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
switch (adapter->hw.mac_type) {
case e1000_82543:
- if(adapter->hw.media_type == e1000_media_type_copper) {
+ if (adapter->hw.media_type == e1000_media_type_copper) {
/* Attempt to setup Loopback mode on Non-integrated PHY.
* Some PHY registers get corrupted at random, so
* attempt this 10 times.
*/
- while(e1000_nonintegrated_phy_loopback(adapter) &&
+ while (e1000_nonintegrated_phy_loopback(adapter) &&
count++ < 10);
- if(count < 11)
+ if (count < 11)
return 0;
}
break;
@@ -1327,11 +1345,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
static int
e1000_setup_loopback_test(struct e1000_adapter *adapter)
{
- uint32_t rctl;
struct e1000_hw *hw = &adapter->hw;
+ uint32_t rctl;
if (hw->media_type == e1000_media_type_fiber ||
- hw->media_type == e1000_media_type_internal_serdes) {
+ hw->media_type == e1000_media_type_internal_serdes) {
switch (hw->mac_type) {
case e1000_82545:
case e1000_82546:
@@ -1362,25 +1380,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
static void
e1000_loopback_cleanup(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
uint32_t rctl;
uint16_t phy_reg;
- struct e1000_hw *hw = &adapter->hw;
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl = E1000_READ_REG(hw, RCTL);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ E1000_WRITE_REG(hw, RCTL, rctl);
switch (hw->mac_type) {
case e1000_82571:
case e1000_82572:
if (hw->media_type == e1000_media_type_fiber ||
- hw->media_type == e1000_media_type_internal_serdes){
+ hw->media_type == e1000_media_type_internal_serdes) {
#define E1000_SERDES_LB_OFF 0x400
E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
msec_delay(10);
break;
}
- /* fall thru for Cu adapters */
+ /* Fall Through */
case e1000_82545:
case e1000_82546:
case e1000_82545_rev_3:
@@ -1401,7 +1419,7 @@ static void
e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
- frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
+ frame_size &= ~1;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
@@ -1410,9 +1428,9 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
static int
e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
{
- frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
- if(*(skb->data + 3) == 0xFF) {
- if((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF) {
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
return 0;
}
@@ -1431,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
- /* Calculate the loop count based on the largest descriptor ring
+ /* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
- if(rxdr->count <= txdr->count)
+ if (rxdr->count <= txdr->count)
lc = ((txdr->count / 64) * 2) + 1;
else
lc = ((rxdr->count / 64) * 2) + 1;
k = l = 0;
- for(j = 0; j <= lc; j++) { /* loop count loop */
- for(i = 0; i < 64; i++) { /* send the packets */
- e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
+ for (j = 0; j <= lc; j++) { /* loop count loop */
+ for (i = 0; i < 64; i++) { /* send the packets */
+ e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ pci_dma_sync_single_for_device(pdev,
txdr->buffer_info[k].dma,
txdr->buffer_info[k].length,
PCI_DMA_TODEVICE);
- if(unlikely(++k == txdr->count)) k = 0;
+ if (unlikely(++k == txdr->count)) k = 0;
}
E1000_WRITE_REG(&adapter->hw, TDT, k);
msec_delay(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ pci_dma_sync_single_for_cpu(pdev,
rxdr->buffer_info[l].dma,
rxdr->buffer_info[l].length,
PCI_DMA_FROMDEVICE);
-
+
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
1024);
- if(!ret_val)
+ if (!ret_val)
good_cnt++;
- if(unlikely(++l == rxdr->count)) l = 0;
- /* time + 20 msecs (200 msecs on 2.4) is more than
- * enough time to complete the receives, if it's
+ if (unlikely(++l == rxdr->count)) l = 0;
+ /* time + 20 msecs (200 msecs on 2.4) is more than
+ * enough time to complete the receives, if it's
* exceeded, break and error off
*/
} while (good_cnt < 64 && jiffies < (time + 20));
- if(good_cnt != 64) {
+ if (good_cnt != 64) {
ret_val = 13; /* ret_val is the same as mis-compare */
- break;
+ break;
}
- if(jiffies >= (time + 2)) {
+ if (jiffies >= (time + 2)) {
ret_val = 14; /* error code for time out error */
break;
}
@@ -1488,14 +1506,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
static int
e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
{
- if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback;
- if((*data = e1000_setup_loopback_test(adapter)))
- goto err_loopback_setup;
+ /* PHY loopback cannot be performed if SoL/IDER
+ * sessions are active */
+ if (e1000_check_phy_reset_block(&adapter->hw)) {
+ DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+ "when SoL/IDER is active.\n");
+ *data = 0;
+ goto out;
+ }
+
+ if ((*data = e1000_setup_desc_rings(adapter)))
+ goto out;
+ if ((*data = e1000_setup_loopback_test(adapter)))
+ goto err_loopback;
*data = e1000_run_loopback_test(adapter);
e1000_loopback_cleanup(adapter);
-err_loopback_setup:
- e1000_free_desc_rings(adapter);
+
err_loopback:
+ e1000_free_desc_rings(adapter);
+out:
return *data;
}
@@ -1519,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
*data = 1;
} else {
e1000_check_for_link(&adapter->hw);
- if(adapter->hw.autoneg) /* if auto_neg is set wait for it */
+ if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
msec_delay(4000);
- if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
+ if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
*data = 1;
}
}
return *data;
}
-static int
+static int
e1000_diag_test_count(struct net_device *netdev)
{
return E1000_TEST_LEN;
@@ -1542,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
boolean_t if_running = netif_running(netdev);
- if(eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
/* save speed, duplex, autoneg settings */
@@ -1552,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev,
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
- if(e1000_link_test(adapter, &data[4]))
+ if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- if(if_running)
+ if (if_running)
e1000_down(adapter);
else
e1000_reset(adapter);
- if(e1000_reg_test(adapter, &data[0]))
+ if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000_reset(adapter);
- if(e1000_eeprom_test(adapter, &data[1]))
+ if (e1000_eeprom_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000_reset(adapter);
- if(e1000_intr_test(adapter, &data[2]))
+ if (e1000_intr_test(adapter, &data[2]))
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000_reset(adapter);
- if(e1000_loopback_test(adapter, &data[3]))
+ if (e1000_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* restore speed, duplex, autoneg settings */
@@ -1581,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev,
adapter->hw.autoneg = autoneg;
e1000_reset(adapter);
- if(if_running)
+ if (if_running)
e1000_up(adapter);
} else {
/* Online tests */
- if(e1000_link_test(adapter, &data[4]))
+ if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Offline tests aren't run; pass by default */
@@ -1603,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- switch(adapter->hw.device_id) {
+ switch (adapter->hw.device_id) {
case E1000_DEV_ID_82542:
case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER:
@@ -1617,8 +1646,9 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber */
- if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
+ if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
wol->supported = 0;
wol->wolopts = 0;
return;
@@ -1630,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
- if(adapter->wol & E1000_WUFC_EX)
+ if (adapter->wol & E1000_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
- if(adapter->wol & E1000_WUFC_MC)
+ if (adapter->wol & E1000_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
- if(adapter->wol & E1000_WUFC_BC)
+ if (adapter->wol & E1000_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
- if(adapter->wol & E1000_WUFC_MAG)
+ if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
return;
}
@@ -1648,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- switch(adapter->hw.device_id) {
+ switch (adapter->hw.device_id) {
case E1000_DEV_ID_82542:
case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER:
@@ -1660,24 +1690,25 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber */
- if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+ if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
return wol->wolopts ? -EOPNOTSUPP : 0;
/* Fall Through */
default:
- if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
adapter->wol = 0;
- if(wol->wolopts & WAKE_UCAST)
+ if (wol->wolopts & WAKE_UCAST)
adapter->wol |= E1000_WUFC_EX;
- if(wol->wolopts & WAKE_MCAST)
+ if (wol->wolopts & WAKE_MCAST)
adapter->wol |= E1000_WUFC_MC;
- if(wol->wolopts & WAKE_BCAST)
+ if (wol->wolopts & WAKE_BCAST)
adapter->wol |= E1000_WUFC_BC;
- if(wol->wolopts & WAKE_MAGIC)
+ if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
}
@@ -1695,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
- if(test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+ if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
e1000_led_off(&adapter->hw);
else
e1000_led_on(&adapter->hw);
@@ -1708,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
+ if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
- if(adapter->hw.mac_type < e1000_82571) {
- if(!adapter->blink_timer.function) {
+ if (adapter->hw.mac_type < e1000_82571) {
+ if (!adapter->blink_timer.function) {
init_timer(&adapter->blink_timer);
adapter->blink_timer.function = e1000_led_blink_callback;
adapter->blink_timer.data = (unsigned long) adapter;
@@ -1721,21 +1752,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
mod_timer(&adapter->blink_timer, jiffies);
msleep_interruptible(data * 1000);
del_timer_sync(&adapter->blink_timer);
- }
- else if(adapter->hw.mac_type < e1000_82573) {
- E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
- E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
- (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
+ } else if (adapter->hw.mac_type < e1000_82573) {
+ E1000_WRITE_REG(&adapter->hw, LEDCTL,
+ (E1000_LEDCTL_LED2_BLINK_RATE |
+ E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
+ (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
+ (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
msleep_interruptible(data * 1000);
- }
- else {
- E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
- E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
- (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
- (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
+ } else {
+ E1000_WRITE_REG(&adapter->hw, LEDCTL,
+ (E1000_LEDCTL_LED2_BLINK_RATE |
+ E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
+ (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
+ (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
msleep_interruptible(data * 1000);
}
@@ -1750,50 +1781,89 @@ static int
e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if(netif_running(netdev)) {
+ if (netif_running(netdev)) {
e1000_down(adapter);
e1000_up(adapter);
}
return 0;
}
-static int
+static int
e1000_get_stats_count(struct net_device *netdev)
{
return E1000_STATS_LEN;
}
-static void
-e1000_get_ethtool_stats(struct net_device *netdev,
+static void
+e1000_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_E1000_MQ
+ uint64_t *queue_stat;
+ int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
+ int j, k;
+#endif
int i;
e1000_update_stats(adapter);
- for(i = 0; i < E1000_STATS_LEN; i++) {
- char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
- data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+ data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
}
+#ifdef CONFIG_E1000_MQ
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
+ for (k = 0; k < stat_count; k++)
+ data[i + k] = queue_stat[k];
+ i += k;
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
+ for (k = 0; k < stat_count; k++)
+ data[i + k] = queue_stat[k];
+ i += k;
+ }
+#endif
+/* BUG_ON(i != E1000_STATS_LEN); */
}
-static void
+static void
e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
{
+#ifdef CONFIG_E1000_MQ
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+#endif
+ uint8_t *p = data;
int i;
- switch(stringset) {
+ switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *e1000_gstrings_test,
+ memcpy(data, *e1000_gstrings_test,
E1000_TEST_LEN*ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i=0; i < E1000_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- e1000_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, e1000_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
}
+#ifdef CONFIG_E1000_MQ
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+#endif
+/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 136fc031e4ad..beeec0fbbeac 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82546GB_SERDES:
case E1000_DEV_ID_82546GB_PCIE:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
hw->mac_type = e1000_82546_rev_3;
break;
case E1000_DEV_ID_82541EI:
@@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw)
uint16_t cmd_mmrbc;
uint16_t stat_mmrbc;
uint32_t mta_size;
+ uint32_t ctrl_ext;
DEBUGFUNC("e1000_init_hw");
@@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw)
break;
case e1000_82571:
case e1000_82572:
- ctrl |= (1 << 22);
case e1000_82573:
ctrl |= E1000_TXDCTL_COUNT_DESC;
break;
@@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw)
*/
e1000_clear_hw_cntrs(hw);
+ if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+ hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ /* Relaxed ordering must be disabled to avoid a parity
+ * error crash in a PCI slot. */
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ }
+
return ret_val;
}
@@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw)
DEBUGFUNC("e1000_setup_link");
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not have to set it up again. */
+ if (e1000_check_phy_reset_block(hw))
+ return E1000_SUCCESS;
+
/* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
* a bit that determines whether the HW defaults to enabling or
@@ -1584,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
if(ret_val)
return ret_val;
- /* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
- if(ret_val)
- return ret_val;
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if(ret_val)
+ return ret_val;
/* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
@@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
void
e1000_config_collision_dist(struct e1000_hw *hw)
{
- uint32_t tctl;
+ uint32_t tctl, coll_dist;
DEBUGFUNC("e1000_config_collision_dist");
+ if (hw->mac_type < e1000_82543)
+ coll_dist = E1000_COLLISION_DISTANCE_82542;
+ else
+ coll_dist = E1000_COLLISION_DISTANCE;
+
tctl = E1000_READ_REG(hw, TCTL);
tctl &= ~E1000_TCTL_COLD;
- tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+ tctl |= coll_dist << E1000_COLD_SHIFT;
E1000_WRITE_REG(hw, TCTL, tctl);
E1000_WRITE_FLUSH(hw);
@@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
if (hw->mac_type < e1000_82571)
msec_delay(10);
+ else
+ udelay(100);
E1000_WRITE_REG(hw, CTRL, ctrl);
E1000_WRITE_FLUSH(hw);
@@ -3881,17 +3904,19 @@ e1000_read_eeprom(struct e1000_hw *hw,
return -E1000_ERR_EEPROM;
}
- /* FLASH reads without acquiring the semaphore are safe in 82573-based
- * controllers.
- */
- if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
- (hw->mac_type != e1000_82573)) {
- /* Prepare the EEPROM for reading */
- if(e1000_acquire_eeprom(hw) != E1000_SUCCESS)
- return -E1000_ERR_EEPROM;
+ /* FLASH reads without acquiring the semaphore are safe */
+ if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
+ hw->eeprom.use_eerd == FALSE) {
+ switch (hw->mac_type) {
+ default:
+ /* Prepare the EEPROM for reading */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+ break;
+ }
}
- if(eeprom->use_eerd == TRUE) {
+ if (eeprom->use_eerd == TRUE) {
ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
(hw->mac_type != e1000_82573))
@@ -4398,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
return -E1000_ERR_EEPROM;
}
- /* If STM opcode located in bits 15:8 of flop, reset firmware */
+ /* If STM opcode located in bits 15:8 of flop, reset firmware */
if ((flop & 0xFF00) == E1000_STM_OPCODE) {
E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
}
@@ -4406,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
/* Perform the flash update */
E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
- for (i=0; i < attempts; i++) {
+ for (i=0; i < attempts; i++) {
eecd = E1000_READ_REG(hw, EECD);
if ((eecd & E1000_EECD_FLUPD) == 0) {
break;
@@ -4479,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
}
+
switch (hw->mac_type) {
default:
break;
@@ -6720,6 +6746,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
break;
}
+ /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
+ * Need to wait for PHY configuration completion before accessing NVM
+ * and PHY. */
+ if (hw->mac_type == e1000_82573)
+ msec_delay(25);
+
return E1000_SUCCESS;
}
@@ -6809,7 +6841,8 @@ int32_t
e1000_check_phy_reset_block(struct e1000_hw *hw)
{
uint32_t manc = 0;
- if(hw->mac_type > e1000_82547_rev_2)
+
+ if (hw->mac_type > e1000_82547_rev_2)
manc = E1000_READ_REG(hw, MANC);
return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
E1000_BLK_PHY_RESET : E1000_SUCCESS;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 7caa35748cea..f1219dd9dbac 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -377,6 +377,7 @@ int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
/* Filters (multicast, vlan, receive) */
+void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -401,7 +402,9 @@ void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
/* Port I/O is only supported on 82544 and newer */
uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
+uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
+void e1000_enable_pciex_master(struct e1000_hw *hw);
int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
void e1000_release_software_semaphore(struct e1000_hw *hw);
@@ -439,6 +442,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82546GB_FIBER 0x107A
#define E1000_DEV_ID_82546GB_SERDES 0x107B
#define E1000_DEV_ID_82546GB_PCIE 0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
#define E1000_DEV_ID_82547EI 0x1019
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
@@ -449,6 +453,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82573E 0x108B
#define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82573L 0x109A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
#define NODE_ADDRESS_SIZE 6
@@ -897,14 +902,14 @@ struct e1000_ffvt_entry {
#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
-#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
-#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
-#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
-#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
-#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
-#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
-#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
-#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
+#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
+#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
+#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
+#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
@@ -1497,6 +1502,7 @@ struct e1000_hw {
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
@@ -1758,7 +1764,6 @@ struct e1000_hw {
#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
still to be processed. */
-
/* Transmit Configuration Word */
#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
@@ -1954,6 +1959,23 @@ struct e1000_host_command_info {
#define E1000_MDALIGN 4096
+/* PCI-Ex registers */
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
/* Function Active and Power State to MNG */
#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
@@ -2077,7 +2099,10 @@ struct e1000_host_command_info {
/* Collision related configuration parameters */
#define E1000_COLLISION_THRESHOLD 15
#define E1000_CT_SHIFT 4
-#define E1000_COLLISION_DISTANCE 64
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLLISION_DISTANCE_82542 64
#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
#define E1000_COLD_SHIFT 12
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 438a931fd55d..31e332935e5a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -29,11 +29,71 @@
#include "e1000.h"
/* Change Log
- * 6.0.58 4/20/05
- * o Accepted ethtool cleanup patch from Stephen Hemminger
- * 6.0.44+ 2/15/05
- * o applied Anton's patch to resolve tx hang in hardware
- * o Applied Andrew Mortons patch - e1000 stops working after resume
+ * 6.3.9 12/16/2005
+ * o incorporate fix for recycled skbs from IBM LTC
+ * 6.3.7 11/18/2005
+ * o Honor eeprom setting for enabling/disabling Wake On Lan
+ * 6.3.5 11/17/2005
+ * o Fix memory leak in rx ring handling for PCI Express adapters
+ * 6.3.4 11/8/05
+ * o Patch from Jesper Juhl to remove redundant NULL checks for kfree
+ * 6.3.2 9/20/05
+ * o Render logic that sets/resets DRV_LOAD as inline functions to
+ * avoid code replication. If f/w is AMT then set DRV_LOAD only when
+ * network interface is open.
+ * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
+ * o Adjust PBA partioning for Jumbo frames using MTU size and not
+ * rx_buffer_len
+ * 6.3.1 9/19/05
+ * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
+ (e1000_clean_tx_irq)
+ * o Support for 8086:10B5 device (Quad Port)
+ * 6.2.14 9/15/05
+ * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface
+ * open/close
+ * 6.2.13 9/14/05
+ * o Invoke e1000_check_mng_mode only for 8257x controllers since it
+ * accesses the FWSM that is not supported in other controllers
+ * 6.2.12 9/9/05
+ * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER
+ * o set RCTL:SECRC only for controllers newer than 82543.
+ * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w.
+ * This code was moved from e1000_remove to e1000_close
+ * 6.2.10 9/6/05
+ * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off.
+ * o Enable fc by default on 82573 controllers (do not read eeprom)
+ * o Fix rx_errors statistic not to include missed_packet_count
+ * o Fix rx_dropped statistic not to include missed_packet_count
+ (Padraig Brady)
+ * 6.2.9 8/30/05
+ * o Remove call to update statistics from the controller ib e1000_get_stats
+ * 6.2.8 8/30/05
+ * o Improved algorithm for rx buffer allocation/rdt update
+ * o Flow control watermarks relative to rx PBA size
+ * o Simplified 'Tx Hung' detect logic
+ * 6.2.7 8/17/05
+ * o Report rx buffer allocation failures and tx timeout counts in stats
+ * 6.2.6 8/16/05
+ * o Implement workaround for controller erratum -- linear non-tso packet
+ * following a TSO gets written back prematurely
+ * 6.2.5 8/15/05
+ * o Set netdev->tx_queue_len based on link speed/duplex settings.
+ * o Fix net_stats.rx_fifo_errors <p@draigBrady.com>
+ * o Do not power off PHY if SoL/IDER session is active
+ * 6.2.4 8/10/05
+ * o Fix loopback test setup/cleanup for 82571/3 controllers
+ * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat
+ * all packets as raw
+ * o Prevent operations that will cause the PHY to be reset if SoL/IDER
+ * sessions are active and log a message
+ * 6.2.2 7/21/05
+ * o used fixed size descriptors for all MTU sizes, reduces memory load
+ * 6.1.2 4/13/05
+ * o Fixed ethtool diagnostics
+ * o Enabled flow control to take default eeprom settings
+ * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
+ * calls, one from mii_ioctl and other from within update_stats while
+ * processing MIIREG ioctl.
*/
char e1000_driver_name[] = "e1000";
@@ -43,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "6.1.16-k2"DRIVERNAPI
+#define DRV_VERSION "6.3.9-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
@@ -97,7 +157,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x108A),
INTEL_E1000_ETHERNET_DEVICE(0x108B),
INTEL_E1000_ETHERNET_DEVICE(0x108C),
+ INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x109A),
+ INTEL_E1000_ETHERNET_DEVICE(0x10B5),
/* required last entry */
{0,}
};
@@ -171,9 +233,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
#endif
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring);
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring);
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
@@ -291,7 +355,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
static inline void
e1000_irq_enable(struct e1000_adapter *adapter)
{
- if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
+ if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
E1000_WRITE_FLUSH(&adapter->hw);
}
@@ -303,23 +367,91 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
uint16_t vid = adapter->hw.mng_cookie.vlan_id;
uint16_t old_vid = adapter->mng_vlan_id;
- if(adapter->vlgrp) {
- if(!adapter->vlgrp->vlan_devices[vid]) {
- if(adapter->hw.mng_cookie.status &
+ if (adapter->vlgrp) {
+ if (!adapter->vlgrp->vlan_devices[vid]) {
+ if (adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
e1000_vlan_rx_add_vid(netdev, vid);
adapter->mng_vlan_id = vid;
} else
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
-
- if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
+
+ if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
+ (vid != old_vid) &&
!adapter->vlgrp->vlan_devices[old_vid])
e1000_vlan_rx_kill_vid(netdev, old_vid);
}
}
}
-
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the netowrk i/f is closed.
+ *
+ **/
+
+static inline void
+e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+ uint32_t ctrl_ext;
+ uint32_t swsm;
+
+ /* Let firmware taken over control of h/w */
+ switch (adapter->hw.mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ break;
+ case e1000_82573:
+ swsm = E1000_READ_REG(&adapter->hw, SWSM);
+ E1000_WRITE_REG(&adapter->hw, SWSM,
+ swsm & ~E1000_SWSM_DRV_LOAD);
+ default:
+ break;
+ }
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the netowrk i/f is open.
+ *
+ **/
+
+static inline void
+e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+ uint32_t ctrl_ext;
+ uint32_t swsm;
+ /* Let firmware know the driver has taken over */
+ switch (adapter->hw.mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ break;
+ case e1000_82573:
+ swsm = E1000_READ_REG(&adapter->hw, SWSM);
+ E1000_WRITE_REG(&adapter->hw, SWSM,
+ swsm | E1000_SWSM_DRV_LOAD);
+ break;
+ default:
+ break;
+ }
+}
+
int
e1000_up(struct e1000_adapter *adapter)
{
@@ -329,10 +461,10 @@ e1000_up(struct e1000_adapter *adapter)
/* hardware has been reset, we need to reload some things */
/* Reset the PHY if it was previously powered down */
- if(adapter->hw.media_type == e1000_media_type_copper) {
+ if (adapter->hw.media_type == e1000_media_type_copper) {
uint16_t mii_reg;
e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
- if(mii_reg & MII_CR_POWER_DOWN)
+ if (mii_reg & MII_CR_POWER_DOWN)
e1000_phy_reset(&adapter->hw);
}
@@ -343,20 +475,26 @@ e1000_up(struct e1000_adapter *adapter)
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
e1000_configure_rx(adapter);
- for (i = 0; i < adapter->num_queues; i++)
- adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
+ /* call E1000_DESC_UNUSED which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+ adapter->alloc_rx_buf(adapter, ring,
+ E1000_DESC_UNUSED(ring));
+ }
#ifdef CONFIG_PCI_MSI
- if(adapter->hw.mac_type > e1000_82547_rev_2) {
+ if (adapter->hw.mac_type > e1000_82547_rev_2) {
adapter->have_msi = TRUE;
- if((err = pci_enable_msi(adapter->pdev))) {
+ if ((err = pci_enable_msi(adapter->pdev))) {
DPRINTK(PROBE, ERR,
"Unable to allocate MSI interrupt Error: %d\n", err);
adapter->have_msi = FALSE;
}
}
#endif
- if((err = request_irq(adapter->pdev->irq, &e1000_intr,
+ if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
SA_SHIRQ | SA_SAMPLE_RANDOM,
netdev->name, netdev))) {
DPRINTK(PROBE, ERR,
@@ -364,6 +502,12 @@ e1000_up(struct e1000_adapter *adapter)
return err;
}
+#ifdef CONFIG_E1000_MQ
+ e1000_setup_queue_mapping(adapter);
+#endif
+
+ adapter->tx_queue_len = netdev->tx_queue_len;
+
mod_timer(&adapter->watchdog_timer, jiffies);
#ifdef CONFIG_E1000_NAPI
@@ -378,6 +522,8 @@ void
e1000_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
+ e1000_check_mng_mode(&adapter->hw);
e1000_irq_disable(adapter);
#ifdef CONFIG_E1000_MQ
@@ -385,7 +531,7 @@ e1000_down(struct e1000_adapter *adapter)
#endif
free_irq(adapter->pdev->irq, netdev);
#ifdef CONFIG_PCI_MSI
- if(adapter->hw.mac_type > e1000_82547_rev_2 &&
+ if (adapter->hw.mac_type > e1000_82547_rev_2 &&
adapter->have_msi == TRUE)
pci_disable_msi(adapter->pdev);
#endif
@@ -396,6 +542,7 @@ e1000_down(struct e1000_adapter *adapter)
#ifdef CONFIG_E1000_NAPI
netif_poll_disable(netdev);
#endif
+ netdev->tx_queue_len = adapter->tx_queue_len;
adapter->link_speed = 0;
adapter->link_duplex = 0;
netif_carrier_off(netdev);
@@ -405,12 +552,16 @@ e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_tx_rings(adapter);
e1000_clean_all_rx_rings(adapter);
- /* If WoL is not enabled and management mode is not IAMT
- * Power down the PHY so no link is implied when interface is down */
- if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
+ /* Power down the PHY so no link is implied when interface is down *
+ * The PHY cannot be powered down if any of the following is TRUE *
+ * (a) WoL is enabled
+ * (b) AMT is active
+ * (c) SoL/IDER session is active */
+ if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
adapter->hw.media_type == e1000_media_type_copper &&
- !e1000_check_mng_mode(&adapter->hw) &&
- !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) {
+ !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
+ !mng_mode_enabled &&
+ !e1000_check_phy_reset_block(&adapter->hw)) {
uint16_t mii_reg;
e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
@@ -422,10 +573,8 @@ e1000_down(struct e1000_adapter *adapter)
void
e1000_reset(struct e1000_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
uint32_t pba, manc;
uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
- uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
@@ -448,19 +597,12 @@ e1000_reset(struct e1000_adapter *adapter)
break;
}
- if((adapter->hw.mac_type != e1000_82573) &&
- (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
+ if ((adapter->hw.mac_type != e1000_82573) &&
+ (adapter->netdev->mtu > E1000_RXBUFFER_8192))
pba -= 8; /* allocate more FIFO for Tx */
- /* send an XOFF when there is enough space in the
- * Rx FIFO to hold one extra full size Rx packet
- */
- fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
- ETHERNET_FCS_SIZE + 1;
- fc_low_water_mark = fc_high_water_mark + 8;
- }
- if(adapter->hw.mac_type == e1000_82547) {
+ if (adapter->hw.mac_type == e1000_82547) {
adapter->tx_fifo_head = 0;
adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
adapter->tx_fifo_size =
@@ -471,19 +613,21 @@ e1000_reset(struct e1000_adapter *adapter)
E1000_WRITE_REG(&adapter->hw, PBA, pba);
/* flow control settings */
- adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
- fc_high_water_mark;
- adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
- fc_low_water_mark;
+ /* Set the FC high water mark to 90% of the FIFO size.
+ * Required to clear last 3 LSB */
+ fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+
+ adapter->hw.fc_high_water = fc_high_water_mark;
+ adapter->hw.fc_low_water = fc_high_water_mark - 8;
adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
adapter->hw.fc_send_xon = 1;
adapter->hw.fc = adapter->hw.original_fc;
/* Allow time for pending master requests to run */
e1000_reset_hw(&adapter->hw);
- if(adapter->hw.mac_type >= e1000_82544)
+ if (adapter->hw.mac_type >= e1000_82544)
E1000_WRITE_REG(&adapter->hw, WUC, 0);
- if(e1000_init_hw(&adapter->hw))
+ if (e1000_init_hw(&adapter->hw))
DPRINTK(PROBE, ERR, "Hardware Error\n");
e1000_update_mng_vlan(adapter);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -517,33 +661,31 @@ e1000_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct e1000_adapter *adapter;
unsigned long mmio_start, mmio_len;
- uint32_t ctrl_ext;
- uint32_t swsm;
static int cards_found = 0;
int i, err, pci_using_dac;
uint16_t eeprom_data;
uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
- if((err = pci_enable_device(pdev)))
+ if ((err = pci_enable_device(pdev)))
return err;
- if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+ if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
pci_using_dac = 1;
} else {
- if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+ if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
E1000_ERR("No usable DMA configuration, aborting\n");
return err;
}
pci_using_dac = 0;
}
- if((err = pci_request_regions(pdev, e1000_driver_name)))
+ if ((err = pci_request_regions(pdev, e1000_driver_name)))
return err;
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct e1000_adapter));
- if(!netdev) {
+ if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
}
@@ -562,15 +704,15 @@ e1000_probe(struct pci_dev *pdev,
mmio_len = pci_resource_len(pdev, BAR_0);
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
- if(!adapter->hw.hw_addr) {
+ if (!adapter->hw.hw_addr) {
err = -EIO;
goto err_ioremap;
}
- for(i = BAR_1; i <= BAR_5; i++) {
- if(pci_resource_len(pdev, i) == 0)
+ for (i = BAR_1; i <= BAR_5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
continue;
- if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
adapter->hw.io_base = pci_resource_start(pdev, i);
break;
}
@@ -607,13 +749,13 @@ e1000_probe(struct pci_dev *pdev,
/* setup the private structure */
- if((err = e1000_sw_init(adapter)))
+ if ((err = e1000_sw_init(adapter)))
goto err_sw_init;
- if((err = e1000_check_phy_reset_block(&adapter->hw)))
+ if ((err = e1000_check_phy_reset_block(&adapter->hw)))
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
- if(adapter->hw.mac_type >= e1000_82543) {
+ if (adapter->hw.mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX |
@@ -622,16 +764,16 @@ e1000_probe(struct pci_dev *pdev,
}
#ifdef NETIF_F_TSO
- if((adapter->hw.mac_type >= e1000_82544) &&
+ if ((adapter->hw.mac_type >= e1000_82544) &&
(adapter->hw.mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
#ifdef NETIF_F_TSO_IPV6
- if(adapter->hw.mac_type > e1000_82547_rev_2)
+ if (adapter->hw.mac_type > e1000_82547_rev_2)
netdev->features |= NETIF_F_TSO_IPV6;
#endif
#endif
- if(pci_using_dac)
+ if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
/* hard_start_xmit is safe against parallel locking */
@@ -639,14 +781,14 @@ e1000_probe(struct pci_dev *pdev,
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
- /* before reading the EEPROM, reset the controller to
+ /* before reading the EEPROM, reset the controller to
* put the device in a known good starting state */
-
+
e1000_reset_hw(&adapter->hw);
/* make sure the EEPROM is good */
- if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
+ if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
@@ -654,12 +796,12 @@ e1000_probe(struct pci_dev *pdev,
/* copy the MAC address out of the EEPROM */
- if(e1000_read_mac_addr(&adapter->hw))
+ if (e1000_read_mac_addr(&adapter->hw))
DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
- if(!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
@@ -699,7 +841,7 @@ e1000_probe(struct pci_dev *pdev,
* enable the ACPI Magic Packet filter
*/
- switch(adapter->hw.mac_type) {
+ switch (adapter->hw.mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
case e1000_82543:
@@ -712,8 +854,7 @@ e1000_probe(struct pci_dev *pdev,
case e1000_82546:
case e1000_82546_rev_3:
case e1000_82571:
- if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
- && (adapter->hw.media_type == e1000_media_type_copper)) {
+ if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
e1000_read_eeprom(&adapter->hw,
EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
break;
@@ -724,31 +865,42 @@ e1000_probe(struct pci_dev *pdev,
EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
break;
}
- if(eeprom_data & eeprom_apme_mask)
+ if (eeprom_data & eeprom_apme_mask)
adapter->wol |= E1000_WUFC_MAG;
+ /* print bus type/speed/width info */
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+ ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+ (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+ ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+ (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+ (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+ (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+ ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+ (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+ (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+ "32-bit"));
+ }
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
/* reset the hardware with the new settings */
e1000_reset(adapter);
- /* Let firmware know the driver has taken over */
- switch(adapter->hw.mac_type) {
- case e1000_82571:
- case e1000_82572:
- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
- ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
- break;
- case e1000_82573:
- swsm = E1000_READ_REG(&adapter->hw, SWSM);
- E1000_WRITE_REG(&adapter->hw, SWSM,
- swsm | E1000_SWSM_DRV_LOAD);
- break;
- default:
- break;
- }
+ /* If the controller is 82573 and f/w is AMT, do not set
+ * DRV_LOAD until the interface is up. For all other cases,
+ * let the f/w know that the h/w is now under the control
+ * of the driver. */
+ if (adapter->hw.mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(&adapter->hw))
+ e1000_get_hw_control(adapter);
strcpy(netdev->name, "eth%d");
- if((err = register_netdev(netdev)))
+ if ((err = register_netdev(netdev)))
goto err_register;
DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -782,47 +934,33 @@ e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- uint32_t ctrl_ext;
- uint32_t manc, swsm;
+ uint32_t manc;
#ifdef CONFIG_E1000_NAPI
int i;
#endif
flush_scheduled_work();
- if(adapter->hw.mac_type >= e1000_82540 &&
+ if (adapter->hw.mac_type >= e1000_82540 &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
- if(manc & E1000_MANC_SMBUS_EN) {
+ if (manc & E1000_MANC_SMBUS_EN) {
manc |= E1000_MANC_ARP_EN;
E1000_WRITE_REG(&adapter->hw, MANC, manc);
}
}
- switch(adapter->hw.mac_type) {
- case e1000_82571:
- case e1000_82572:
- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
- ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
- break;
- case e1000_82573:
- swsm = E1000_READ_REG(&adapter->hw, SWSM);
- E1000_WRITE_REG(&adapter->hw, SWSM,
- swsm & ~E1000_SWSM_DRV_LOAD);
- break;
-
- default:
- break;
- }
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ e1000_release_hw_control(adapter);
unregister_netdev(netdev);
#ifdef CONFIG_E1000_NAPI
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++)
__dev_put(&adapter->polling_netdev[i]);
#endif
- if(!e1000_check_phy_reset_block(&adapter->hw))
+ if (!e1000_check_phy_reset_block(&adapter->hw))
e1000_phy_hw_reset(&adapter->hw);
kfree(adapter->tx_ring);
@@ -881,19 +1019,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
/* identify the MAC */
- if(e1000_set_mac_type(hw)) {
+ if (e1000_set_mac_type(hw)) {
DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
return -EIO;
}
/* initialize eeprom parameters */
- if(e1000_init_eeprom_params(hw)) {
+ if (e1000_init_eeprom_params(hw)) {
E1000_ERR("EEPROM initialization failed\n");
return -EIO;
}
- switch(hw->mac_type) {
+ switch (hw->mac_type) {
default:
break;
case e1000_82541:
@@ -912,7 +1050,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
/* Copper options */
- if(hw->media_type == e1000_media_type_copper) {
+ if (hw->media_type == e1000_media_type_copper) {
hw->mdix = AUTO_ALL_MODES;
hw->disable_polarity_correction = FALSE;
hw->master_slave = E1000_MASTER_SLAVE;
@@ -923,15 +1061,34 @@ e1000_sw_init(struct e1000_adapter *adapter)
switch (hw->mac_type) {
case e1000_82571:
case e1000_82572:
- adapter->num_queues = 2;
+ /* These controllers support 2 tx queues, but with a single
+ * qdisc implementation, multiple tx queues aren't quite as
+ * interesting. If we can find a logical way of mapping
+ * flows to a queue, then perhaps we can up the num_tx_queue
+ * count back to its default. Until then, we run the risk of
+ * terrible performance due to SACK overload. */
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 2;
break;
default:
- adapter->num_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
break;
}
- adapter->num_queues = min(adapter->num_queues, num_online_cpus());
+ adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
+ adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
+ DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
+ adapter->num_rx_queues,
+ ((adapter->num_rx_queues == 1)
+ ? ((num_online_cpus() > 1)
+ ? "(due to unsupported feature in current adapter)"
+ : "(due to unsupported system configuration)")
+ : ""));
+ DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
+ adapter->num_tx_queues);
#else
- adapter->num_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
#endif
if (e1000_alloc_queues(adapter)) {
@@ -940,17 +1097,14 @@ e1000_sw_init(struct e1000_adapter *adapter)
}
#ifdef CONFIG_E1000_NAPI
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->polling_netdev[i].priv = adapter;
adapter->polling_netdev[i].poll = &e1000_clean;
adapter->polling_netdev[i].weight = 64;
dev_hold(&adapter->polling_netdev[i]);
set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
}
-#endif
-
-#ifdef CONFIG_E1000_MQ
- e1000_setup_queue_mapping(adapter);
+ spin_lock_init(&adapter->tx_queue_lock);
#endif
atomic_set(&adapter->irq_sem, 1);
@@ -973,13 +1127,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
{
int size;
- size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
+ size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
adapter->tx_ring = kmalloc(size, GFP_KERNEL);
if (!adapter->tx_ring)
return -ENOMEM;
memset(adapter->tx_ring, 0, size);
- size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+ size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
adapter->rx_ring = kmalloc(size, GFP_KERNEL);
if (!adapter->rx_ring) {
kfree(adapter->tx_ring);
@@ -988,7 +1142,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
memset(adapter->rx_ring, 0, size);
#ifdef CONFIG_E1000_NAPI
- size = sizeof(struct net_device) * adapter->num_queues;
+ size = sizeof(struct net_device) * adapter->num_rx_queues;
adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
if (!adapter->polling_netdev) {
kfree(adapter->tx_ring);
@@ -998,6 +1152,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
memset(adapter->polling_netdev, 0, size);
#endif
+#ifdef CONFIG_E1000_MQ
+ adapter->rx_sched_call_data.func = e1000_rx_schedule;
+ adapter->rx_sched_call_data.info = adapter->netdev;
+
+ adapter->cpu_netdev = alloc_percpu(struct net_device *);
+ adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
+#endif
+
return E1000_SUCCESS;
}
@@ -1017,14 +1179,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter)
lock_cpu_hotplug();
i = 0;
for_each_online_cpu(cpu) {
- *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues];
+ *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
/* This is incomplete because we'd like to assign separate
* physical cpus to these netdev polling structures and
* avoid saturating a subset of cpus.
*/
- if (i < adapter->num_queues) {
+ if (i < adapter->num_rx_queues) {
*per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
- adapter->cpu_for_queue[i] = cpu;
+ adapter->rx_ring[i].cpu = cpu;
+ cpu_set(cpu, adapter->cpumask);
} else
*per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
@@ -1063,14 +1226,20 @@ e1000_open(struct net_device *netdev)
if ((err = e1000_setup_all_rx_resources(adapter)))
goto err_setup_rx;
- if((err = e1000_up(adapter)))
+ if ((err = e1000_up(adapter)))
goto err_up;
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- if((adapter->hw.mng_cookie.status &
+ if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
e1000_update_mng_vlan(adapter);
}
+ /* If AMT is enabled, let the firmware know that the network
+ * interface is now open */
+ if (adapter->hw.mac_type == e1000_82573 &&
+ e1000_check_mng_mode(&adapter->hw))
+ e1000_get_hw_control(adapter);
+
return E1000_SUCCESS;
err_up:
@@ -1105,10 +1274,17 @@ e1000_close(struct net_device *netdev)
e1000_free_all_tx_resources(adapter);
e1000_free_all_rx_resources(adapter);
- if((adapter->hw.mng_cookie.status &
+ if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
+
+ /* If AMT is enabled, let the firmware know that the network
+ * interface is now closed */
+ if (adapter->hw.mac_type == e1000_82573 &&
+ e1000_check_mng_mode(&adapter->hw))
+ e1000_release_hw_control(adapter);
+
return 0;
}
@@ -1153,7 +1329,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
- if(!txdr->buffer_info) {
+ if (!txdr->buffer_info) {
DPRINTK(PROBE, ERR,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
@@ -1166,7 +1342,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
E1000_ROUNDUP(txdr->size, 4096);
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
- if(!txdr->desc) {
+ if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
DPRINTK(PROBE, ERR,
@@ -1182,8 +1358,8 @@ setup_tx_desc_die:
"at %p\n", txdr->size, txdr->desc);
/* Try again, without freeing the previous */
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
- if(!txdr->desc) {
/* Failed allocation, critical failure */
+ if (!txdr->desc) {
pci_free_consistent(pdev, txdr->size, olddesc, olddma);
goto setup_tx_desc_die;
}
@@ -1229,7 +1405,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
if (err) {
DPRINTK(PROBE, ERR,
@@ -1254,10 +1430,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
uint64_t tdba;
struct e1000_hw *hw = &adapter->hw;
uint32_t tdlen, tctl, tipg, tarc;
+ uint32_t ipgr1, ipgr2;
/* Setup the HW Tx Head and Tail descriptor pointers */
- switch (adapter->num_queues) {
+ switch (adapter->num_tx_queues) {
case 2:
tdba = adapter->tx_ring[1].dma;
tdlen = adapter->tx_ring[1].count *
@@ -1287,22 +1464,26 @@ e1000_configure_tx(struct e1000_adapter *adapter)
/* Set the default values for the Tx Inter Packet Gap timer */
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes)
+ tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+ else
+ tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
tipg = DEFAULT_82542_TIPG_IPGT;
- tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
- tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+ ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+ ipgr2 = DEFAULT_82542_TIPG_IPGR2;
break;
default:
- if (hw->media_type == e1000_media_type_fiber ||
- hw->media_type == e1000_media_type_internal_serdes)
- tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
- else
- tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
- tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
- tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+ ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+ ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+ break;
}
+ tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
E1000_WRITE_REG(hw, TIPG, tipg);
/* Set the Tx Interrupt Delay register */
@@ -1378,7 +1559,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_ps_page) * rxdr->count;
rxdr->ps_page = kmalloc(size, GFP_KERNEL);
- if(!rxdr->ps_page) {
+ if (!rxdr->ps_page) {
vfree(rxdr->buffer_info);
DPRINTK(PROBE, ERR,
"Unable to allocate memory for the receive descriptor ring\n");
@@ -1388,7 +1569,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
- if(!rxdr->ps_page_dma) {
+ if (!rxdr->ps_page_dma) {
vfree(rxdr->buffer_info);
kfree(rxdr->ps_page);
DPRINTK(PROBE, ERR,
@@ -1397,7 +1578,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
}
memset(rxdr->ps_page_dma, 0, size);
- if(adapter->hw.mac_type <= e1000_82547_rev_2)
+ if (adapter->hw.mac_type <= e1000_82547_rev_2)
desc_len = sizeof(struct e1000_rx_desc);
else
desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1454,6 +1635,8 @@ setup_rx_desc_die:
rxdr->next_to_clean = 0;
rxdr->next_to_use = 0;
+ rxdr->rx_skb_top = NULL;
+ rxdr->rx_skb_prev = NULL;
return 0;
}
@@ -1475,7 +1658,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
if (err) {
DPRINTK(PROBE, ERR,
@@ -1498,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
{
uint32_t rctl, rfctl;
uint32_t psrctl = 0;
-#ifdef CONFIG_E1000_PACKET_SPLIT
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
uint32_t pages = 0;
#endif
@@ -1510,7 +1693,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
- if(adapter->hw.tbi_compatibility_on == 1)
+ if (adapter->hw.mac_type > e1000_82543)
+ rctl |= E1000_RCTL_SECRC;
+
+ if (adapter->hw.tbi_compatibility_on == 1)
rctl |= E1000_RCTL_SBP;
else
rctl &= ~E1000_RCTL_SBP;
@@ -1521,32 +1707,17 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rctl |= E1000_RCTL_LPE;
/* Setup buffer sizes */
- if(adapter->hw.mac_type >= e1000_82571) {
+ if (adapter->hw.mac_type >= e1000_82571) {
/* We can now specify buffers in 1K increments.
* BSIZE and BSEX are ignored in this case. */
rctl |= adapter->rx_buffer_len << 0x11;
} else {
rctl &= ~E1000_RCTL_SZ_4096;
- rctl |= E1000_RCTL_BSEX;
- switch (adapter->rx_buffer_len) {
- case E1000_RXBUFFER_2048:
- default:
- rctl |= E1000_RCTL_SZ_2048;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case E1000_RXBUFFER_4096:
- rctl |= E1000_RCTL_SZ_4096;
- break;
- case E1000_RXBUFFER_8192:
- rctl |= E1000_RCTL_SZ_8192;
- break;
- case E1000_RXBUFFER_16384:
- rctl |= E1000_RCTL_SZ_16384;
- break;
- }
+ rctl &= ~E1000_RCTL_BSEX;
+ rctl |= E1000_RCTL_SZ_2048;
}
-#ifdef CONFIG_E1000_PACKET_SPLIT
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
/* 82571 and greater support packet-split where the protocol
* header is placed in skb->data and the packet data is
* placed in pages hanging off of skb_shinfo(skb)->nr_frags.
@@ -1570,7 +1741,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
-
+
psrctl |= adapter->rx_ps_bsize0 >>
E1000_PSRCTL_BSIZE0_SHIFT;
@@ -1632,22 +1803,27 @@ e1000_configure_rx(struct e1000_adapter *adapter)
if (hw->mac_type >= e1000_82540) {
E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
- if(adapter->itr > 1)
+ if (adapter->itr > 1)
E1000_WRITE_REG(hw, ITR,
1000000000 / (adapter->itr * 256));
}
if (hw->mac_type >= e1000_82571) {
- /* Reset delay timers after every interrupt */
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ /* Reset delay timers after every interrupt */
ctrl_ext |= E1000_CTRL_EXT_CANC;
+#ifdef CONFIG_E1000_NAPI
+ /* Auto-Mask interrupts upon ICR read. */
+ ctrl_ext |= E1000_CTRL_EXT_IAME;
+#endif
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ E1000_WRITE_REG(hw, IAM, ~0);
E1000_WRITE_FLUSH(hw);
}
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- switch (adapter->num_queues) {
+ switch (adapter->num_rx_queues) {
#ifdef CONFIG_E1000_MQ
case 2:
rdba = adapter->rx_ring[1].dma;
@@ -1674,7 +1850,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
}
#ifdef CONFIG_E1000_MQ
- if (adapter->num_queues > 1) {
+ if (adapter->num_rx_queues > 1) {
uint32_t random[10];
get_random_bytes(&random[0], 40);
@@ -1684,7 +1860,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
E1000_WRITE_REG(hw, RSSIM, 0);
}
- switch (adapter->num_queues) {
+ switch (adapter->num_rx_queues) {
case 2:
default:
reta = 0x00800080;
@@ -1716,13 +1892,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
/* Enable 82543 Receive Checksum Offload for TCP and UDP */
if (hw->mac_type >= e1000_82543) {
rxcsum = E1000_READ_REG(hw, RXCSUM);
- if(adapter->rx_csum == TRUE) {
+ if (adapter->rx_csum == TRUE) {
rxcsum |= E1000_RXCSUM_TUOFL;
/* Enable 82571 IPv4 payload checksum for UDP fragments
* Must be used in conjunction with packet-split. */
- if ((hw->mac_type >= e1000_82571) &&
- (adapter->rx_ps_pages)) {
+ if ((hw->mac_type >= e1000_82571) &&
+ (adapter->rx_ps_pages)) {
rxcsum |= E1000_RXCSUM_IPPCSE;
}
} else {
@@ -1776,7 +1952,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_tx_queues; i++)
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
}
@@ -1784,17 +1960,15 @@ static inline void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
- if(buffer_info->dma) {
+ if (buffer_info->dma) {
pci_unmap_page(adapter->pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
}
- if(buffer_info->skb) {
+ if (buffer_info->skb)
dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
- }
+ memset(buffer_info, 0, sizeof(struct e1000_buffer));
}
/**
@@ -1813,7 +1987,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
/* Free all the Tx ring sk_buffs */
- for(i = 0; i < tx_ring->count; i++) {
+ for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
}
@@ -1843,7 +2017,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_tx_queues; i++)
e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
}
@@ -1887,7 +2061,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++)
e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
}
@@ -1909,12 +2083,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
unsigned int i, j;
/* Free all the Rx ring sk_buffs */
-
- for(i = 0; i < rx_ring->count; i++) {
+ for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
- if(buffer_info->skb) {
- ps_page = &rx_ring->ps_page[i];
- ps_page_dma = &rx_ring->ps_page_dma[i];
+ if (buffer_info->skb) {
pci_unmap_single(pdev,
buffer_info->dma,
buffer_info->length,
@@ -1922,19 +2093,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
-
- for(j = 0; j < adapter->rx_ps_pages; j++) {
- if(!ps_page->ps_page[j]) break;
- pci_unmap_single(pdev,
- ps_page_dma->ps_page_dma[j],
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
- ps_page_dma->ps_page_dma[j] = 0;
- put_page(ps_page->ps_page[j]);
- ps_page->ps_page[j] = NULL;
- }
}
+ ps_page = &rx_ring->ps_page[i];
+ ps_page_dma = &rx_ring->ps_page_dma[i];
+ for (j = 0; j < adapter->rx_ps_pages; j++) {
+ if (!ps_page->ps_page[j]) break;
+ pci_unmap_page(pdev,
+ ps_page_dma->ps_page_dma[j],
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ ps_page_dma->ps_page_dma[j] = 0;
+ put_page(ps_page->ps_page[j]);
+ ps_page->ps_page[j] = NULL;
+ }
+ }
+
+ /* there also may be some cached data in our adapter */
+ if (rx_ring->rx_skb_top) {
+ dev_kfree_skb(rx_ring->rx_skb_top);
+
+ /* rx_skb_prev will be wiped out by rx_skb_top */
+ rx_ring->rx_skb_top = NULL;
+ rx_ring->rx_skb_prev = NULL;
}
+
size = sizeof(struct e1000_buffer) * rx_ring->count;
memset(rx_ring->buffer_info, 0, size);
size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -1963,7 +2145,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++)
e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
}
@@ -1984,7 +2166,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
E1000_WRITE_FLUSH(&adapter->hw);
mdelay(5);
- if(netif_running(netdev))
+ if (netif_running(netdev))
e1000_clean_all_rx_rings(adapter);
}
@@ -2000,12 +2182,14 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
E1000_WRITE_FLUSH(&adapter->hw);
mdelay(5);
- if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
e1000_pci_set_mwi(&adapter->hw);
- if(netif_running(netdev)) {
+ if (netif_running(netdev)) {
+ /* No need to loop, because 82542 supports only 1 queue */
+ struct e1000_rx_ring *ring = &adapter->rx_ring[0];
e1000_configure_rx(adapter);
- e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
+ adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
}
}
@@ -2023,12 +2207,12 @@ e1000_set_mac(struct net_device *netdev, void *p)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
- if(!is_valid_ether_addr(addr->sa_data))
+ if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
/* 82542 2.0 needs to be in reset to write receive address registers */
- if(adapter->hw.mac_type == e1000_82542_rev2_0)
+ if (adapter->hw.mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -2042,17 +2226,17 @@ e1000_set_mac(struct net_device *netdev, void *p)
/* activate the work around */
adapter->hw.laa_is_present = 1;
- /* Hold a copy of the LAA in RAR[14] This is done so that
- * between the time RAR[0] gets clobbered and the time it
- * gets fixed (in e1000_watchdog), the actual LAA is in one
+ /* Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed (in e1000_watchdog), the actual LAA is in one
* of the RARs and no incoming packets directed to this port
- * are dropped. Eventaully the LAA will be in RAR[0] and
+ * are dropped. Eventaully the LAA will be in RAR[0] and
* RAR[14] */
- e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
+ e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
E1000_RAR_ENTRIES - 1);
}
- if(adapter->hw.mac_type == e1000_82542_rev2_0)
+ if (adapter->hw.mac_type == e1000_82542_rev2_0)
e1000_leave_82542_rst(adapter);
return 0;
@@ -2086,9 +2270,9 @@ e1000_set_multi(struct net_device *netdev)
rctl = E1000_READ_REG(hw, RCTL);
- if(netdev->flags & IFF_PROMISC) {
+ if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- } else if(netdev->flags & IFF_ALLMULTI) {
+ } else if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
rctl &= ~E1000_RCTL_UPE;
} else {
@@ -2099,7 +2283,7 @@ e1000_set_multi(struct net_device *netdev)
/* 82542 2.0 needs to be in reset to write receive address registers */
- if(hw->mac_type == e1000_82542_rev2_0)
+ if (hw->mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
/* load the first 14 multicast address into the exact filters 1-14
@@ -2109,7 +2293,7 @@ e1000_set_multi(struct net_device *netdev)
*/
mc_ptr = netdev->mc_list;
- for(i = 1; i < rar_entries; i++) {
+ for (i = 1; i < rar_entries; i++) {
if (mc_ptr) {
e1000_rar_set(hw, mc_ptr->dmi_addr, i);
mc_ptr = mc_ptr->next;
@@ -2121,17 +2305,17 @@ e1000_set_multi(struct net_device *netdev)
/* clear the old settings from the multicast hash table */
- for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
+ for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* load any remaining addresses into the hash table */
- for(; mc_ptr; mc_ptr = mc_ptr->next) {
+ for (; mc_ptr; mc_ptr = mc_ptr->next) {
hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
e1000_mta_set(hw, hash_value);
}
- if(hw->mac_type == e1000_82542_rev2_0)
+ if (hw->mac_type == e1000_82542_rev2_0)
e1000_leave_82542_rst(adapter);
}
@@ -2157,8 +2341,8 @@ e1000_82547_tx_fifo_stall(unsigned long data)
struct net_device *netdev = adapter->netdev;
uint32_t tctl;
- if(atomic_read(&adapter->tx_fifo_stall)) {
- if((E1000_READ_REG(&adapter->hw, TDT) ==
+ if (atomic_read(&adapter->tx_fifo_stall)) {
+ if ((E1000_READ_REG(&adapter->hw, TDT) ==
E1000_READ_REG(&adapter->hw, TDH)) &&
(E1000_READ_REG(&adapter->hw, TDFT) ==
E1000_READ_REG(&adapter->hw, TDFH)) &&
@@ -2204,24 +2388,24 @@ static void
e1000_watchdog_task(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct e1000_tx_ring *txdr = &adapter->tx_ring[0];
+ struct e1000_tx_ring *txdr = adapter->tx_ring;
uint32_t link;
e1000_check_for_link(&adapter->hw);
if (adapter->hw.mac_type == e1000_82573) {
e1000_enable_tx_pkt_filtering(&adapter->hw);
- if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
+ if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
e1000_update_mng_vlan(adapter);
- }
+ }
- if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
+ if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
!(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
link = !adapter->hw.serdes_link_down;
else
link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
- if(link) {
- if(!netif_carrier_ok(netdev)) {
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
e1000_get_speed_and_duplex(&adapter->hw,
&adapter->link_speed,
&adapter->link_duplex);
@@ -2231,13 +2415,28 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
adapter->link_duplex == FULL_DUPLEX ?
"Full Duplex" : "Half Duplex");
+ /* tweak tx_queue_len according to speed/duplex */
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ adapter->tx_timeout_factor = 1;
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ netdev->tx_queue_len = 10;
+ adapter->tx_timeout_factor = 8;
+ break;
+ case SPEED_100:
+ netdev->tx_queue_len = 100;
+ break;
+ }
+ }
+
netif_carrier_on(netdev);
netif_wake_queue(netdev);
mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
adapter->smartspeed = 0;
}
} else {
- if(netif_carrier_ok(netdev)) {
+ if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -2263,7 +2462,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
e1000_update_adaptive(&adapter->hw);
- if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) {
+#ifdef CONFIG_E1000_MQ
+ txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
+#endif
+ if (!netif_carrier_ok(netdev)) {
if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
@@ -2274,12 +2476,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
}
/* Dynamic mode for Interrupt Throttle Rate (ITR) */
- if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
+ if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
/* Symmetric Tx/Rx gets a reduced ITR=2000; Total
* asymmetrical Tx or Rx gets ITR=8000; everyone
* else is between 2000-8000. */
uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
- uint32_t dif = (adapter->gotcl > adapter->gorcl ?
+ uint32_t dif = (adapter->gotcl > adapter->gorcl ?
adapter->gotcl - adapter->gorcl :
adapter->gorcl - adapter->gotcl) / 10000;
uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
@@ -2292,7 +2494,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = TRUE;
- /* With 82571 controllers, LAA may be overwritten due to controller
+ /* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0] */
if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
@@ -2314,13 +2516,14 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
{
#ifdef NETIF_F_TSO
struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
unsigned int i;
uint32_t cmd_length = 0;
uint16_t ipcse = 0, tucse, mss;
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
int err;
- if(skb_shinfo(skb)->tso_size) {
+ if (skb_shinfo(skb)->tso_size) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
@@ -2329,7 +2532,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
mss = skb_shinfo(skb)->tso_size;
- if(skb->protocol == ntohs(ETH_P_IP)) {
+ if (skb->protocol == ntohs(ETH_P_IP)) {
skb->nh.iph->tot_len = 0;
skb->nh.iph->check = 0;
skb->h.th->check =
@@ -2341,7 +2544,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb->h.raw - skb->data - 1;
#ifdef NETIF_F_TSO_IPV6
- } else if(skb->protocol == ntohs(ETH_P_IPV6)) {
+ } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
skb->nh.ipv6h->payload_len = 0;
skb->h.th->check =
~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2363,6 +2566,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
i = tx_ring->next_to_use;
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
context_desc->lower_setup.ip_fields.ipcss = ipcss;
context_desc->lower_setup.ip_fields.ipcso = ipcso;
@@ -2374,14 +2578,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+ buffer_info->time_stamp = jiffies;
+
if (++i == tx_ring->count) i = 0;
tx_ring->next_to_use = i;
- return 1;
+ return TRUE;
}
#endif
- return 0;
+ return FALSE;
}
static inline boolean_t
@@ -2389,13 +2595,15 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb)
{
struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
unsigned int i;
uint8_t css;
- if(likely(skb->ip_summed == CHECKSUM_HW)) {
+ if (likely(skb->ip_summed == CHECKSUM_HW)) {
css = skb->h.raw - skb->data;
i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
context_desc->upper_setup.tcp_fields.tucss = css;
@@ -2404,6 +2612,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
+ buffer_info->time_stamp = jiffies;
+
if (unlikely(++i == tx_ring->count)) i = 0;
tx_ring->next_to_use = i;
@@ -2429,7 +2639,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
i = tx_ring->next_to_use;
- while(len) {
+ while (len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
#ifdef NETIF_F_TSO
@@ -2445,7 +2655,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
- if(unlikely(mss && !nr_frags && size == len && size > 8))
+ if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
#endif
/* work-around for errata 10 and it applies
@@ -2453,13 +2663,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
* The fix is to make sure that the first descriptor of a
* packet is smaller than 2048 - 16 - 16 (or 2016) bytes
*/
- if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+ if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
(size > 2015) && count == 0))
size = 2015;
-
+
/* Workaround for potential 82544 hang in PCI-X. Avoid
* terminating buffers within evenly-aligned dwords. */
- if(unlikely(adapter->pcix_82544 &&
+ if (unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
size > 4))
size -= 4;
@@ -2475,29 +2685,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
len -= size;
offset += size;
count++;
- if(unlikely(++i == tx_ring->count)) i = 0;
+ if (unlikely(++i == tx_ring->count)) i = 0;
}
- for(f = 0; f < nr_frags; f++) {
+ for (f = 0; f < nr_frags; f++) {
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
offset = frag->page_offset;
- while(len) {
+ while (len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
#ifdef NETIF_F_TSO
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
- if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
size -= 4;
#endif
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
* dwords. */
- if(unlikely(adapter->pcix_82544 &&
+ if (unlikely(adapter->pcix_82544 &&
!((unsigned long)(frag->page+offset+size-1) & 4) &&
size > 4))
size -= 4;
@@ -2514,7 +2724,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
len -= size;
offset += size;
count++;
- if(unlikely(++i == tx_ring->count)) i = 0;
+ if (unlikely(++i == tx_ring->count)) i = 0;
}
}
@@ -2534,35 +2744,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
unsigned int i;
- if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+ if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
- if(likely(tx_flags & E1000_TX_FLAGS_IPV4))
+ if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
txd_upper |= E1000_TXD_POPTS_IXSM << 8;
}
- if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+ if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
}
- if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+ if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
txd_lower |= E1000_TXD_CMD_VLE;
txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
}
i = tx_ring->next_to_use;
- while(count--) {
+ while (count--) {
buffer_info = &tx_ring->buffer_info[i];
tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
tx_desc->lower.data =
cpu_to_le32(txd_lower | buffer_info->length);
tx_desc->upper.data = cpu_to_le32(txd_upper);
- if(unlikely(++i == tx_ring->count)) i = 0;
+ if (unlikely(++i == tx_ring->count)) i = 0;
}
tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -2597,20 +2807,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
- if(adapter->link_duplex != HALF_DUPLEX)
+ if (adapter->link_duplex != HALF_DUPLEX)
goto no_fifo_stall_required;
- if(atomic_read(&adapter->tx_fifo_stall))
+ if (atomic_read(&adapter->tx_fifo_stall))
return 1;
- if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+ if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
atomic_set(&adapter->tx_fifo_stall, 1);
return 1;
}
no_fifo_stall_required:
adapter->tx_fifo_head += skb_fifo_len;
- if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
+ if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
adapter->tx_fifo_head -= adapter->tx_fifo_size;
return 0;
}
@@ -2621,27 +2831,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
{
struct e1000_hw *hw = &adapter->hw;
uint16_t length, offset;
- if(vlan_tx_tag_present(skb)) {
- if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ if (vlan_tx_tag_present(skb)) {
+ if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
( adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
return 0;
}
- if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
+ if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
struct ethhdr *eth = (struct ethhdr *) skb->data;
- if((htons(ETH_P_IP) == eth->h_proto)) {
- const struct iphdr *ip =
+ if ((htons(ETH_P_IP) == eth->h_proto)) {
+ const struct iphdr *ip =
(struct iphdr *)((uint8_t *)skb->data+14);
- if(IPPROTO_UDP == ip->protocol) {
- struct udphdr *udp =
- (struct udphdr *)((uint8_t *)ip +
+ if (IPPROTO_UDP == ip->protocol) {
+ struct udphdr *udp =
+ (struct udphdr *)((uint8_t *)ip +
(ip->ihl << 2));
- if(ntohs(udp->dest) == 67) {
+ if (ntohs(udp->dest) == 67) {
offset = (uint8_t *)udp + 8 - skb->data;
length = skb->len - offset;
return e1000_mng_write_dhcp_info(hw,
- (uint8_t *)udp + 8,
+ (uint8_t *)udp + 8,
length);
}
}
@@ -2664,7 +2874,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int nr_frags = 0;
unsigned int mss = 0;
int count = 0;
- int tso;
+ int tso;
unsigned int f;
len -= skb->data_len;
@@ -2687,16 +2897,35 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
* drops. */
- if(mss) {
+ if (mss) {
+ uint8_t hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
+
+ /* TSO Workaround for 82571/2 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data */
+ hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+ if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) &&
+ (adapter->hw.mac_type == e1000_82571 ||
+ adapter->hw.mac_type == e1000_82572)) {
+ unsigned int pull_size;
+ pull_size = min((unsigned int)4, skb->data_len);
+ if (!__pskb_pull_tail(skb, pull_size)) {
+ printk(KERN_ERR "__pskb_pull_tail failed.\n");
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+ len = skb->len - skb->data_len;
+ }
}
- if((mss) || (skb->ip_summed == CHECKSUM_HW))
+ /* reserve a descriptor for the offload context */
+ if ((mss) || (skb->ip_summed == CHECKSUM_HW))
count++;
count++;
#else
- if(skb->ip_summed == CHECKSUM_HW)
+ if (skb->ip_summed == CHECKSUM_HW)
count++;
#endif
@@ -2709,45 +2938,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count += TXD_USE_COUNT(len, max_txd_pwr);
- if(adapter->pcix_82544)
+ if (adapter->pcix_82544)
count++;
- /* work-around for errata 10 and it applies to all controllers
+ /* work-around for errata 10 and it applies to all controllers
* in PCI-X mode, so add one more descriptor to the count
*/
- if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+ if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
(len > 2015)))
count++;
nr_frags = skb_shinfo(skb)->nr_frags;
- for(f = 0; f < nr_frags; f++)
+ for (f = 0; f < nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
max_txd_pwr);
- if(adapter->pcix_82544)
+ if (adapter->pcix_82544)
count += nr_frags;
-#ifdef NETIF_F_TSO
- /* TSO Workaround for 82571/2 Controllers -- if skb->data
- * points to just header, pull a few bytes of payload from
- * frags into skb->data */
- if (skb_shinfo(skb)->tso_size) {
- uint8_t hdr_len;
- hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
- if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
- (adapter->hw.mac_type == e1000_82571 ||
- adapter->hw.mac_type == e1000_82572)) {
- unsigned int pull_size;
- pull_size = min((unsigned int)4, skb->data_len);
- if (!__pskb_pull_tail(skb, pull_size)) {
- printk(KERN_ERR "__pskb_pull_tail failed.\n");
- dev_kfree_skb_any(skb);
- return -EFAULT;
- }
- }
- }
-#endif
-
- if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
+ if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
e1000_transfer_dhcp_info(adapter, skb);
local_irq_save(flags);
@@ -2765,8 +2973,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- if(unlikely(adapter->hw.mac_type == e1000_82547)) {
- if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+ if (unlikely(adapter->hw.mac_type == e1000_82547)) {
+ if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
@@ -2774,13 +2982,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
}
- if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+ if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
tx_flags |= E1000_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
}
first = tx_ring->next_to_use;
-
+
tso = e1000_tso(adapter, tx_ring, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
@@ -2833,6 +3041,7 @@ e1000_tx_timeout_task(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ adapter->tx_timeout_count++;
e1000_down(adapter);
e1000_up(adapter);
}
@@ -2850,7 +3059,7 @@ e1000_get_stats(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- e1000_update_stats(adapter);
+ /* only return the current stats */
return &adapter->net_stats;
}
@@ -2868,56 +3077,57 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
struct e1000_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
- if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
- (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
- return -EINVAL;
- }
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
- /* might want this to be bigger enum check... */
- /* 82571 controllers limit jumbo frame size to 10500 bytes */
- if ((adapter->hw.mac_type == e1000_82571 ||
- adapter->hw.mac_type == e1000_82572) &&
- max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
- "on 82571 and 82572 controllers.\n");
- return -EINVAL;
- }
-
- if(adapter->hw.mac_type == e1000_82573 &&
- max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
- DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
- "on 82573\n");
+ if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
return -EINVAL;
}
- if(adapter->hw.mac_type > e1000_82547_rev_2) {
- adapter->rx_buffer_len = max_frame;
- E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
- } else {
- if(unlikely((adapter->hw.mac_type < e1000_82543) &&
- (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
- DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
- "on 82542\n");
+ /* Adapter-specific max frame size limits. */
+ switch (adapter->hw.mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82573:
+ if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+ break;
+ case e1000_82571:
+ case e1000_82572:
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+ DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
return -EINVAL;
-
- } else {
- if(max_frame <= E1000_RXBUFFER_2048) {
- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
- } else if(max_frame <= E1000_RXBUFFER_4096) {
- adapter->rx_buffer_len = E1000_RXBUFFER_4096;
- } else if(max_frame <= E1000_RXBUFFER_8192) {
- adapter->rx_buffer_len = E1000_RXBUFFER_8192;
- } else if(max_frame <= E1000_RXBUFFER_16384) {
- adapter->rx_buffer_len = E1000_RXBUFFER_16384;
- }
}
+ break;
+ default:
+ /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+ break;
}
+ /* since the driver code now supports splitting a packet across
+ * multiple descriptors, most of the fifo related limitations on
+ * jumbo frame traffic have gone away.
+ * simply use 2k descriptors for everything.
+ *
+ * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size
+ * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+ /* recent hardware supports 1KB granularity */
+ if (adapter->hw.mac_type > e1000_82547_rev_2) {
+ adapter->rx_buffer_len =
+ ((max_frame < E1000_RXBUFFER_2048) ?
+ max_frame : E1000_RXBUFFER_2048);
+ E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
+ } else
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+
netdev->mtu = new_mtu;
- if(netif_running(netdev)) {
+ if (netif_running(netdev)) {
e1000_down(adapter);
e1000_up(adapter);
}
@@ -3004,7 +3214,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
hw->collision_delta = E1000_READ_REG(hw, COLC);
adapter->stats.colc += hw->collision_delta;
- if(hw->mac_type >= e1000_82543) {
+ if (hw->mac_type >= e1000_82543) {
adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
@@ -3012,7 +3222,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
}
- if(hw->mac_type > e1000_82547_rev_2) {
+ if (hw->mac_type > e1000_82547_rev_2) {
adapter->stats.iac += E1000_READ_REG(hw, IAC);
adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
@@ -3037,12 +3247,11 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->net_stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.rlec + adapter->stats.mpc +
- adapter->stats.cexterr;
+ adapter->stats.rlec + adapter->stats.cexterr;
+ adapter->net_stats.rx_dropped = 0;
adapter->net_stats.rx_length_errors = adapter->stats.rlec;
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
- adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
@@ -3057,14 +3266,14 @@ e1000_update_stats(struct e1000_adapter *adapter)
/* Phy Stats */
- if(hw->media_type == e1000_media_type_copper) {
- if((adapter->link_speed == SPEED_1000) &&
+ if (hw->media_type == e1000_media_type_copper) {
+ if ((adapter->link_speed == SPEED_1000) &&
(!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
adapter->phy_stats.idle_errors += phy_tmp;
}
- if((hw->mac_type <= e1000_82546) &&
+ if ((hw->mac_type <= e1000_82546) &&
(hw->phy_type == e1000_phy_m88) &&
!e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
adapter->phy_stats.receive_errors += phy_tmp;
@@ -3110,32 +3319,44 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
uint32_t icr = E1000_READ_REG(hw, ICR);
-#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI)
+#ifndef CONFIG_E1000_NAPI
int i;
+#else
+ /* Interrupt Auto-Mask...upon reading ICR,
+ * interrupts are masked. No need for the
+ * IMC write, but it does mean we should
+ * account for it ASAP. */
+ if (likely(hw->mac_type >= e1000_82571))
+ atomic_inc(&adapter->irq_sem);
#endif
- if(unlikely(!icr))
+ if (unlikely(!icr)) {
+#ifdef CONFIG_E1000_NAPI
+ if (hw->mac_type >= e1000_82571)
+ e1000_irq_enable(adapter);
+#endif
return IRQ_NONE; /* Not our interrupt */
+ }
- if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+ if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
mod_timer(&adapter->watchdog_timer, jiffies);
}
#ifdef CONFIG_E1000_NAPI
- atomic_inc(&adapter->irq_sem);
- E1000_WRITE_REG(hw, IMC, ~0);
- E1000_WRITE_FLUSH(hw);
+ if (unlikely(hw->mac_type < e1000_82571)) {
+ atomic_inc(&adapter->irq_sem);
+ E1000_WRITE_REG(hw, IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+ }
#ifdef CONFIG_E1000_MQ
if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
- cpu_set(adapter->cpu_for_queue[0],
- adapter->rx_sched_call_data.cpumask);
- for (i = 1; i < adapter->num_queues; i++) {
- cpu_set(adapter->cpu_for_queue[i],
- adapter->rx_sched_call_data.cpumask);
- atomic_inc(&adapter->irq_sem);
- }
- atomic_set(&adapter->rx_sched_call_data.count, i);
+ /* We must setup the cpumask once count == 0 since
+ * each cpu bit is cleared when the work is done. */
+ adapter->rx_sched_call_data.cpumask = adapter->cpumask;
+ atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
+ atomic_set(&adapter->rx_sched_call_data.count,
+ adapter->num_rx_queues);
smp_call_async_mask(&adapter->rx_sched_call_data);
} else {
printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
@@ -3149,26 +3370,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
#else /* if !CONFIG_E1000_NAPI */
/* Writing IMC and IMS is needed for 82547.
- Due to Hub Link bus being occupied, an interrupt
- de-assertion message is not able to be sent.
- When an interrupt assertion message is generated later,
- two messages are re-ordered and sent out.
- That causes APIC to think 82547 is in de-assertion
- state, while 82547 is in assertion state, resulting
- in dead lock. Writing IMC forces 82547 into
- de-assertion state.
- */
- if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
+ * Due to Hub Link bus being occupied, an interrupt
+ * de-assertion message is not able to be sent.
+ * When an interrupt assertion message is generated later,
+ * two messages are re-ordered and sent out.
+ * That causes APIC to think 82547 is in de-assertion
+ * state, while 82547 is in assertion state, resulting
+ * in dead lock. Writing IMC forces 82547 into
+ * de-assertion state.
+ */
+ if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(hw, IMC, ~0);
}
- for(i = 0; i < E1000_MAX_INTR; i++)
- if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ for (i = 0; i < E1000_MAX_INTR; i++)
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
break;
- if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
+ if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
e1000_irq_enable(adapter);
#endif /* CONFIG_E1000_NAPI */
@@ -3187,7 +3408,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
{
struct e1000_adapter *adapter;
int work_to_do = min(*budget, poll_dev->quota);
- int tx_cleaned, i = 0, work_done = 0;
+ int tx_cleaned = 0, i = 0, work_done = 0;
/* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv;
@@ -3198,19 +3419,31 @@ e1000_clean(struct net_device *poll_dev, int *budget)
while (poll_dev != &adapter->polling_netdev[i]) {
i++;
- if (unlikely(i == adapter->num_queues))
+ if (unlikely(i == adapter->num_rx_queues))
BUG();
}
- tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+ if (likely(adapter->num_tx_queues == 1)) {
+ /* e1000_clean is called per-cpu. This lock protects
+ * tx_ring[0] from being cleaned by multiple cpus
+ * simultaneously. A failure obtaining the lock means
+ * tx_ring[0] is currently being cleaned anyway. */
+ if (spin_trylock(&adapter->tx_queue_lock)) {
+ tx_cleaned = e1000_clean_tx_irq(adapter,
+ &adapter->tx_ring[0]);
+ spin_unlock(&adapter->tx_queue_lock);
+ }
+ } else
+ tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+
adapter->clean_rx(adapter, &adapter->rx_ring[i],
&work_done, work_to_do);
*budget -= work_done;
poll_dev->quota -= work_done;
-
+
/* If no Tx and not enough Rx work done, exit the polling mode */
- if((!tx_cleaned && (work_done == 0)) ||
+ if ((!tx_cleaned && (work_done == 0)) ||
!netif_running(adapter->netdev)) {
quit_polling:
netif_rx_complete(poll_dev);
@@ -3242,22 +3475,24 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
eop_desc = E1000_TX_DESC(*tx_ring, eop);
while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
- for(cleaned = FALSE; !cleaned; ) {
+ for (cleaned = FALSE; !cleaned; ) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
+#ifdef CONFIG_E1000_MQ
+ tx_ring->tx_stats.bytes += buffer_info->length;
+#endif
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
- tx_desc->buffer_addr = 0;
- tx_desc->lower.data = 0;
- tx_desc->upper.data = 0;
-
- if(unlikely(++i == tx_ring->count)) i = 0;
+ if (unlikely(++i == tx_ring->count)) i = 0;
}
- tx_ring->pkt++;
-
+#ifdef CONFIG_E1000_MQ
+ tx_ring->tx_stats.packets++;
+#endif
+
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
}
@@ -3266,7 +3501,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
spin_lock(&tx_ring->tx_lock);
- if(unlikely(cleaned && netif_queue_stopped(netdev) &&
+ if (unlikely(cleaned && netif_queue_stopped(netdev) &&
netif_carrier_ok(netdev)))
netif_wake_queue(netdev);
@@ -3276,32 +3511,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE;
- if (tx_ring->buffer_info[i].dma &&
- time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
+ if (tx_ring->buffer_info[eop].dma &&
+ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+ adapter->tx_timeout_factor * HZ)
&& !(E1000_READ_REG(&adapter->hw, STATUS) &
- E1000_STATUS_TXOFF)) {
+ E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = E1000_TX_DESC(*tx_ring, eop);
DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+ " Tx Queue <%lu>\n"
" TDH <%x>\n"
" TDT <%x>\n"
" next_to_use <%x>\n"
" next_to_clean <%x>\n"
"buffer_info[next_to_clean]\n"
- " dma <%llx>\n"
" time_stamp <%lx>\n"
" next_to_watch <%x>\n"
" jiffies <%lx>\n"
" next_to_watch.status <%x>\n",
+ (unsigned long)((tx_ring - adapter->tx_ring) /
+ sizeof(struct e1000_tx_ring)),
readl(adapter->hw.hw_addr + tx_ring->tdh),
readl(adapter->hw.hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
- i,
- (unsigned long long)tx_ring->buffer_info[i].dma,
- tx_ring->buffer_info[i].time_stamp,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
eop,
jiffies,
eop_desc->upper.fields.status);
@@ -3329,21 +3563,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
skb->ip_summed = CHECKSUM_NONE;
/* 82543 or newer only */
- if(unlikely(adapter->hw.mac_type < e1000_82543)) return;
+ if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
/* Ignore Checksum bit is set */
- if(unlikely(status & E1000_RXD_STAT_IXSM)) return;
+ if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
/* TCP/UDP checksum error bit is set */
- if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
+ if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
/* let the stack verify checksum errors */
adapter->hw_csum_err++;
return;
}
/* TCP/UDP Checksum has not been calculated */
- if(adapter->hw.mac_type <= e1000_82547_rev_2) {
- if(!(status & E1000_RXD_STAT_TCPCS))
+ if (adapter->hw.mac_type <= e1000_82547_rev_2) {
+ if (!(status & E1000_RXD_STAT_TCPCS))
return;
} else {
- if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+ if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
@@ -3379,46 +3613,87 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_rx_desc *rx_desc;
- struct e1000_buffer *buffer_info;
- struct sk_buff *skb;
+ struct e1000_rx_desc *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
unsigned long flags;
uint32_t length;
uint8_t last_byte;
unsigned int i;
- boolean_t cleaned = FALSE;
+ int cleaned_count = 0;
+ boolean_t cleaned = FALSE, multi_descriptor = FALSE;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
- while(rx_desc->status & E1000_RXD_STAT_DD) {
- buffer_info = &rx_ring->buffer_info[i];
+ while (rx_desc->status & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb, *next_skb;
+ u8 status;
#ifdef CONFIG_E1000_NAPI
- if(*work_done >= work_to_do)
+ if (*work_done >= work_to_do)
break;
(*work_done)++;
#endif
- cleaned = TRUE;
+ status = rx_desc->status;
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ if (++i == rx_ring->count) i = 0;
+ next_rxd = E1000_RX_DESC(*rx_ring, i);
+ next_buffer = &rx_ring->buffer_info[i];
+ next_skb = next_buffer->skb;
+ cleaned = TRUE;
+ cleaned_count++;
pci_unmap_single(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
- skb = buffer_info->skb;
length = le16_to_cpu(rx_desc->length);
- if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
- /* All receives must fit into a single buffer */
- E1000_DBG("%s: Receive packet consumed multiple"
- " buffers\n", netdev->name);
- dev_kfree_skb_irq(skb);
+ skb_put(skb, length);
+
+ if (!(status & E1000_RXD_STAT_EOP)) {
+ if (!rx_ring->rx_skb_top) {
+ rx_ring->rx_skb_top = skb;
+ rx_ring->rx_skb_top->len = length;
+ rx_ring->rx_skb_prev = skb;
+ } else {
+ if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
+ rx_ring->rx_skb_prev->next = skb;
+ skb->prev = rx_ring->rx_skb_prev;
+ } else {
+ skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
+ }
+ rx_ring->rx_skb_prev = skb;
+ rx_ring->rx_skb_top->data_len += length;
+ }
goto next_desc;
+ } else {
+ if (rx_ring->rx_skb_top) {
+ if (skb_shinfo(rx_ring->rx_skb_top)
+ ->frag_list) {
+ rx_ring->rx_skb_prev->next = skb;
+ skb->prev = rx_ring->rx_skb_prev;
+ } else
+ skb_shinfo(rx_ring->rx_skb_top)
+ ->frag_list = skb;
+
+ rx_ring->rx_skb_top->data_len += length;
+ rx_ring->rx_skb_top->len +=
+ rx_ring->rx_skb_top->data_len;
+
+ skb = rx_ring->rx_skb_top;
+ multi_descriptor = TRUE;
+ rx_ring->rx_skb_top = NULL;
+ rx_ring->rx_skb_prev = NULL;
+ }
}
- if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+ if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
last_byte = *(skb->data + length - 1);
- if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
+ if (TBI_ACCEPT(&adapter->hw, status,
rx_desc->errors, length, last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
e1000_tbi_adjust_stats(&adapter->hw,
@@ -3433,18 +3708,41 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
}
}
- /* Good Receive */
- skb_put(skb, length - ETHERNET_FCS_SIZE);
+ /* code added for copybreak, this should improve
+ * performance for small packets with large amounts
+ * of reassembly being done in the stack */
+#define E1000_CB_LENGTH 256
+ if ((length < E1000_CB_LENGTH) &&
+ !rx_ring->rx_skb_top &&
+ /* or maybe (status & E1000_RXD_STAT_EOP) && */
+ !multi_descriptor) {
+ struct sk_buff *new_skb =
+ dev_alloc_skb(length + NET_IP_ALIGN);
+ if (new_skb) {
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ new_skb->dev = netdev;
+ memcpy(new_skb->data - NET_IP_ALIGN,
+ skb->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = skb;
+ skb = new_skb;
+ skb_put(skb, length);
+ }
+ }
+
+ /* end copybreak code */
/* Receive Checksum Offload */
e1000_rx_checksum(adapter,
- (uint32_t)(rx_desc->status) |
+ (uint32_t)(status) |
((uint32_t)(rx_desc->errors) << 24),
rx_desc->csum, skb);
+
skb->protocol = eth_type_trans(skb, netdev);
#ifdef CONFIG_E1000_NAPI
- if(unlikely(adapter->vlgrp &&
- (rx_desc->status & E1000_RXD_STAT_VP))) {
+ if (unlikely(adapter->vlgrp &&
+ (status & E1000_RXD_STAT_VP))) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) &
E1000_RXD_SPC_VLAN_MASK);
@@ -3452,8 +3750,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
netif_receive_skb(skb);
}
#else /* CONFIG_E1000_NAPI */
- if(unlikely(adapter->vlgrp &&
- (rx_desc->status & E1000_RXD_STAT_VP))) {
+ if (unlikely(adapter->vlgrp &&
+ (status & E1000_RXD_STAT_VP))) {
vlan_hwaccel_rx(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) &
E1000_RXD_SPC_VLAN_MASK);
@@ -3462,17 +3760,28 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
}
#endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies;
- rx_ring->pkt++;
+#ifdef CONFIG_E1000_MQ
+ rx_ring->rx_stats.packets++;
+ rx_ring->rx_stats.bytes += length;
+#endif
next_desc:
rx_desc->status = 0;
- buffer_info->skb = NULL;
- if(unlikely(++i == rx_ring->count)) i = 0;
- rx_desc = E1000_RX_DESC(*rx_ring, i);
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
}
rx_ring->next_to_clean = i;
- adapter->alloc_rx_buf(adapter, rx_ring);
+
+ cleaned_count = E1000_DESC_UNUSED(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
return cleaned;
}
@@ -3492,52 +3801,59 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
#endif
{
- union e1000_rx_desc_packet_split *rx_desc;
+ union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_buffer *buffer_info;
+ struct e1000_buffer *buffer_info, *next_buffer;
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
- struct sk_buff *skb;
+ struct sk_buff *skb, *next_skb;
unsigned int i, j;
uint32_t length, staterr;
+ int cleaned_count = 0;
boolean_t cleaned = FALSE;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
- while(staterr & E1000_RXD_STAT_DD) {
- buffer_info = &rx_ring->buffer_info[i];
+ while (staterr & E1000_RXD_STAT_DD) {
ps_page = &rx_ring->ps_page[i];
ps_page_dma = &rx_ring->ps_page_dma[i];
#ifdef CONFIG_E1000_NAPI
- if(unlikely(*work_done >= work_to_do))
+ if (unlikely(*work_done >= work_to_do))
break;
(*work_done)++;
#endif
+ skb = buffer_info->skb;
+
+ if (++i == rx_ring->count) i = 0;
+ next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
+ next_buffer = &rx_ring->buffer_info[i];
+ next_skb = next_buffer->skb;
+
cleaned = TRUE;
+ cleaned_count++;
pci_unmap_single(pdev, buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
- skb = buffer_info->skb;
-
- if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
+ if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
E1000_DBG("%s: Packet Split buffers didn't pick up"
" the full packet\n", netdev->name);
dev_kfree_skb_irq(skb);
goto next_desc;
}
- if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
+ if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
dev_kfree_skb_irq(skb);
goto next_desc;
}
length = le16_to_cpu(rx_desc->wb.middle.length0);
- if(unlikely(!length)) {
+ if (unlikely(!length)) {
E1000_DBG("%s: Last part of the packet spanning"
" multiple descriptors\n", netdev->name);
dev_kfree_skb_irq(skb);
@@ -3547,8 +3863,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
/* Good Receive */
skb_put(skb, length);
- for(j = 0; j < adapter->rx_ps_pages; j++) {
- if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
+ for (j = 0; j < adapter->rx_ps_pages; j++) {
+ if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
break;
pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
@@ -3568,15 +3884,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
skb->protocol = eth_type_trans(skb, netdev);
- if(likely(rx_desc->wb.upper.header_status &
- E1000_RXDPS_HDRSTAT_HDRSP)) {
+ if (likely(rx_desc->wb.upper.header_status &
+ E1000_RXDPS_HDRSTAT_HDRSP))
adapter->rx_hdr_split++;
-#ifdef HAVE_RX_ZERO_COPY
- skb_shinfo(skb)->zero_copy = TRUE;
-#endif
- }
#ifdef CONFIG_E1000_NAPI
- if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
+ if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->wb.middle.vlan) &
E1000_RXD_SPC_VLAN_MASK);
@@ -3584,7 +3896,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
netif_receive_skb(skb);
}
#else /* CONFIG_E1000_NAPI */
- if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
+ if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
vlan_hwaccel_rx(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->wb.middle.vlan) &
E1000_RXD_SPC_VLAN_MASK);
@@ -3593,18 +3905,31 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
}
#endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies;
- rx_ring->pkt++;
+#ifdef CONFIG_E1000_MQ
+ rx_ring->rx_stats.packets++;
+ rx_ring->rx_stats.bytes += length;
+#endif
next_desc:
rx_desc->wb.middle.status_error &= ~0xFF;
buffer_info->skb = NULL;
- if(unlikely(++i == rx_ring->count)) i = 0;
- rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
}
rx_ring->next_to_clean = i;
- adapter->alloc_rx_buf(adapter, rx_ring);
+
+ cleaned_count = E1000_DESC_UNUSED(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
return cleaned;
}
@@ -3616,7 +3941,8 @@ next_desc:
static void
e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring)
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -3629,11 +3955,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
- while(!buffer_info->skb) {
- skb = dev_alloc_skb(bufsz);
+ while (cleaned_count--) {
+ if (!(skb = buffer_info->skb))
+ skb = dev_alloc_skb(bufsz);
+ else {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
+
- if(unlikely(!skb)) {
+ if (unlikely(!skb)) {
/* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
break;
}
@@ -3670,6 +4003,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
+map_skb:
buffer_info->dma = pci_map_single(pdev,
skb->data,
adapter->rx_buffer_len,
@@ -3695,20 +4029,23 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
- if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->rdt);
- }
-
- if(unlikely(++i == rx_ring->count)) i = 0;
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
- rx_ring->next_to_use = i;
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+ }
}
/**
@@ -3718,7 +4055,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
static void
e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring)
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -3734,16 +4072,18 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
ps_page = &rx_ring->ps_page[i];
ps_page_dma = &rx_ring->ps_page_dma[i];
- while(!buffer_info->skb) {
+ while (cleaned_count--) {
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
- for(j = 0; j < PS_PAGE_BUFFERS; j++) {
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
if (j < adapter->rx_ps_pages) {
if (likely(!ps_page->ps_page[j])) {
ps_page->ps_page[j] =
alloc_page(GFP_ATOMIC);
- if (unlikely(!ps_page->ps_page[j]))
+ if (unlikely(!ps_page->ps_page[j])) {
+ adapter->alloc_rx_buff_failed++;
goto no_buffers;
+ }
ps_page_dma->ps_page_dma[j] =
pci_map_page(pdev,
ps_page->ps_page[j],
@@ -3751,7 +4091,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
PCI_DMA_FROMDEVICE);
}
/* Refresh the desc even if buffer_addrs didn't
- * change because each write-back erases
+ * change because each write-back erases
* this info.
*/
rx_desc->read.buffer_addr[j+1] =
@@ -3762,8 +4102,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
- if(unlikely(!skb))
+ if (unlikely(!skb)) {
+ adapter->alloc_rx_buff_failed++;
break;
+ }
/* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
@@ -3781,27 +4123,28 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
- if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
- /* Hardware increments by 16 bytes, but packet split
- * descriptors are 32 bytes...so we increment tail
- * twice as much.
- */
- writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
- }
-
- if(unlikely(++i == rx_ring->count)) i = 0;
+ if (unlikely(++i == rx_ring->count)) i = 0;
buffer_info = &rx_ring->buffer_info[i];
ps_page = &rx_ring->ps_page[i];
ps_page_dma = &rx_ring->ps_page_dma[i];
}
no_buffers:
- rx_ring->next_to_use = i;
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ /* Hardware increments by 16 bytes, but packet split
+ * descriptors are 32 bytes...so we increment tail
+ * twice as much.
+ */
+ writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
+ }
}
/**
@@ -3815,24 +4158,24 @@ e1000_smartspeed(struct e1000_adapter *adapter)
uint16_t phy_status;
uint16_t phy_ctrl;
- if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
+ if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
!(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
return;
- if(adapter->smartspeed == 0) {
+ if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
* we assume back-to-back */
e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
- if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
- if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
- if(phy_ctrl & CR_1000T_MS_ENABLE) {
+ if (phy_ctrl & CR_1000T_MS_ENABLE) {
phy_ctrl &= ~CR_1000T_MS_ENABLE;
e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
phy_ctrl);
adapter->smartspeed++;
- if(!e1000_phy_setup_autoneg(&adapter->hw) &&
+ if (!e1000_phy_setup_autoneg(&adapter->hw) &&
!e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
&phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN |
@@ -3842,12 +4185,12 @@ e1000_smartspeed(struct e1000_adapter *adapter)
}
}
return;
- } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+ } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
/* If still no link, perhaps using 2/3 pair cable */
e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
phy_ctrl |= CR_1000T_MS_ENABLE;
e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
- if(!e1000_phy_setup_autoneg(&adapter->hw) &&
+ if (!e1000_phy_setup_autoneg(&adapter->hw) &&
!e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
@@ -3855,7 +4198,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
}
}
/* Restart process after E1000_SMARTSPEED_MAX iterations */
- if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+ if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
adapter->smartspeed = 0;
}
@@ -3896,7 +4239,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
uint16_t spddplx;
unsigned long flags;
- if(adapter->hw.media_type != e1000_media_type_copper)
+ if (adapter->hw.media_type != e1000_media_type_copper)
return -EOPNOTSUPP;
switch (cmd) {
@@ -3904,10 +4247,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
data->phy_id = adapter->hw.phy_addr;
break;
case SIOCGMIIREG:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
spin_lock_irqsave(&adapter->stats_lock, flags);
- if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
&data->val_out)) {
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
@@ -3915,23 +4258,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
break;
case SIOCSMIIREG:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if(data->reg_num & ~(0x1F))
+ if (data->reg_num & ~(0x1F))
return -EFAULT;
mii_reg = data->val_in;
spin_lock_irqsave(&adapter->stats_lock, flags);
- if(e1000_write_phy_reg(&adapter->hw, data->reg_num,
+ if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
mii_reg)) {
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
}
- if(adapter->hw.phy_type == e1000_phy_m88) {
+ if (adapter->hw.phy_type == e1000_phy_m88) {
switch (data->reg_num) {
case PHY_CTRL:
- if(mii_reg & MII_CR_POWER_DOWN)
+ if (mii_reg & MII_CR_POWER_DOWN)
break;
- if(mii_reg & MII_CR_AUTO_NEG_EN) {
+ if (mii_reg & MII_CR_AUTO_NEG_EN) {
adapter->hw.autoneg = 1;
adapter->hw.autoneg_advertised = 0x2F;
} else {
@@ -3946,14 +4289,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
HALF_DUPLEX;
retval = e1000_set_spd_dplx(adapter,
spddplx);
- if(retval) {
+ if (retval) {
spin_unlock_irqrestore(
- &adapter->stats_lock,
+ &adapter->stats_lock,
flags);
return retval;
}
}
- if(netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev)) {
e1000_down(adapter);
e1000_up(adapter);
} else
@@ -3961,7 +4304,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
case M88E1000_PHY_SPEC_CTRL:
case M88E1000_EXT_PHY_SPEC_CTRL:
- if(e1000_phy_reset(&adapter->hw)) {
+ if (e1000_phy_reset(&adapter->hw)) {
spin_unlock_irqrestore(
&adapter->stats_lock, flags);
return -EIO;
@@ -3971,9 +4314,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
} else {
switch (data->reg_num) {
case PHY_CTRL:
- if(mii_reg & MII_CR_POWER_DOWN)
+ if (mii_reg & MII_CR_POWER_DOWN)
break;
- if(netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev)) {
e1000_down(adapter);
e1000_up(adapter);
} else
@@ -3995,7 +4338,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
struct e1000_adapter *adapter = hw->back;
int ret_val = pci_set_mwi(adapter->pdev);
- if(ret_val)
+ if (ret_val)
DPRINTK(PROBE, ERR, "Error in setting MWI\n");
}
@@ -4044,7 +4387,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
e1000_irq_disable(adapter);
adapter->vlgrp = grp;
- if(grp) {
+ if (grp) {
/* enable VLAN tag insert/strip */
ctrl = E1000_READ_REG(&adapter->hw, CTRL);
ctrl |= E1000_CTRL_VME;
@@ -4066,7 +4409,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl &= ~E1000_RCTL_VFE;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
+ if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
@@ -4080,9 +4423,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t vfta, index;
- if((adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
- (vid == adapter->mng_vlan_id))
+
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ (vid == adapter->mng_vlan_id))
return;
/* add VID to filter table */
index = (vid >> 5) & 0x7F;
@@ -4099,15 +4443,19 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
e1000_irq_disable(adapter);
- if(adapter->vlgrp)
+ if (adapter->vlgrp)
adapter->vlgrp->vlan_devices[vid] = NULL;
e1000_irq_enable(adapter);
- if((adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
- (vid == adapter->mng_vlan_id))
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ (vid == adapter->mng_vlan_id)) {
+ /* release control to f/w */
+ e1000_release_hw_control(adapter);
return;
+ }
+
/* remove VID from filter table */
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -4120,10 +4468,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
{
e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
- if(adapter->vlgrp) {
+ if (adapter->vlgrp) {
uint16_t vid;
- for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
- if(!adapter->vlgrp->vlan_devices[vid])
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!adapter->vlgrp->vlan_devices[vid])
continue;
e1000_vlan_rx_add_vid(adapter->netdev, vid);
}
@@ -4136,13 +4484,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
adapter->hw.autoneg = 0;
/* Fiber NICs only allow 1000 gbps Full duplex */
- if((adapter->hw.media_type == e1000_media_type_fiber) &&
+ if ((adapter->hw.media_type == e1000_media_type_fiber) &&
spddplx != (SPEED_1000 + DUPLEX_FULL)) {
DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
- switch(spddplx) {
+ switch (spddplx) {
case SPEED_10 + DUPLEX_HALF:
adapter->hw.forced_speed_duplex = e1000_10_half;
break;
@@ -4168,35 +4516,92 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
}
#ifdef CONFIG_PM
+/* these functions save and restore 16 or 64 dwords (64-256 bytes) of config
+ * space versus the 64 bytes that pci_[save|restore]_state handle
+ */
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+static int
+e1000_pci_save_state(struct e1000_adapter *adapter)
+{
+ struct pci_dev *dev = adapter->pdev;
+ int size;
+ int i;
+ if (adapter->hw.mac_type >= e1000_82571)
+ size = PCIE_CONFIG_SPACE_LEN;
+ else
+ size = PCI_CONFIG_SPACE_LEN;
+
+ WARN_ON(adapter->config_space != NULL);
+
+ adapter->config_space = kmalloc(size, GFP_KERNEL);
+ if (!adapter->config_space) {
+ DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
+ return -ENOMEM;
+ }
+ for (i = 0; i < (size / 4); i++)
+ pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
+ return 0;
+}
+
+static void
+e1000_pci_restore_state(struct e1000_adapter *adapter)
+{
+ struct pci_dev *dev = adapter->pdev;
+ int size;
+ int i;
+ if (adapter->config_space == NULL)
+ return;
+ if (adapter->hw.mac_type >= e1000_82571)
+ size = PCIE_CONFIG_SPACE_LEN;
+ else
+ size = PCI_CONFIG_SPACE_LEN;
+ for (i = 0; i < (size / 4); i++)
+ pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
+ kfree(adapter->config_space);
+ adapter->config_space = NULL;
+ return;
+}
+#endif /* CONFIG_PM */
+
static int
e1000_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
+ uint32_t ctrl, ctrl_ext, rctl, manc, status;
uint32_t wufc = adapter->wol;
+ int retval = 0;
netif_device_detach(netdev);
- if(netif_running(netdev))
+ if (netif_running(netdev))
e1000_down(adapter);
+#ifdef CONFIG_PM
+ /* implement our own version of pci_save_state(pdev) because pci
+ * express adapters have larger 256 byte config spaces */
+ retval = e1000_pci_save_state(adapter);
+ if (retval)
+ return retval;
+#endif
+
status = E1000_READ_REG(&adapter->hw, STATUS);
- if(status & E1000_STATUS_LU)
+ if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
- if(wufc) {
+ if (wufc) {
e1000_setup_rctl(adapter);
e1000_set_multi(netdev);
/* turn on all-multi mode if wake on multicast is enabled */
- if(adapter->wol & E1000_WUFC_MC) {
+ if (adapter->wol & E1000_WUFC_MC) {
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl |= E1000_RCTL_MPE;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
}
- if(adapter->hw.mac_type >= e1000_82540) {
+ if (adapter->hw.mac_type >= e1000_82540) {
ctrl = E1000_READ_REG(&adapter->hw, CTRL);
/* advertise wake from D3Cold */
#define E1000_CTRL_ADVD3WUC 0x00100000
@@ -4207,7 +4612,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
}
- if(adapter->hw.media_type == e1000_media_type_fiber ||
+ if (adapter->hw.media_type == e1000_media_type_fiber ||
adapter->hw.media_type == e1000_media_type_internal_serdes) {
/* keep the laser running in D3 */
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
@@ -4220,96 +4625,96 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
- pci_enable_wake(pdev, 3, 1);
- pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+ retval = pci_enable_wake(pdev, PCI_D3hot, 1);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+ retval = pci_enable_wake(pdev, PCI_D3cold, 1);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
} else {
E1000_WRITE_REG(&adapter->hw, WUC, 0);
E1000_WRITE_REG(&adapter->hw, WUFC, 0);
- pci_enable_wake(pdev, 3, 0);
- pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
+ retval = pci_enable_wake(pdev, PCI_D3hot, 0);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+ retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
}
- pci_save_state(pdev);
-
- if(adapter->hw.mac_type >= e1000_82540 &&
+ if (adapter->hw.mac_type >= e1000_82540 &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
- if(manc & E1000_MANC_SMBUS_EN) {
+ if (manc & E1000_MANC_SMBUS_EN) {
manc |= E1000_MANC_ARP_EN;
E1000_WRITE_REG(&adapter->hw, MANC, manc);
- pci_enable_wake(pdev, 3, 1);
- pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+ retval = pci_enable_wake(pdev, PCI_D3hot, 1);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+ retval = pci_enable_wake(pdev, PCI_D3cold, 1);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
}
}
- switch(adapter->hw.mac_type) {
- case e1000_82571:
- case e1000_82572:
- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
- ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
- break;
- case e1000_82573:
- swsm = E1000_READ_REG(&adapter->hw, SWSM);
- E1000_WRITE_REG(&adapter->hw, SWSM,
- swsm & ~E1000_SWSM_DRV_LOAD);
- break;
- default:
- break;
- }
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ e1000_release_hw_control(adapter);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error in setting power state\n");
return 0;
}
+#ifdef CONFIG_PM
static int
e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- uint32_t manc, ret_val, swsm;
- uint32_t ctrl_ext;
+ int retval;
+ uint32_t manc, ret_val;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ retval = pci_set_power_state(pdev, PCI_D0);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error in setting power state\n");
+ e1000_pci_restore_state(adapter);
ret_val = pci_enable_device(pdev);
pci_set_master(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ retval = pci_enable_wake(pdev, PCI_D3hot, 0);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+ retval = pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
e1000_reset(adapter);
E1000_WRITE_REG(&adapter->hw, WUS, ~0);
- if(netif_running(netdev))
+ if (netif_running(netdev))
e1000_up(adapter);
netif_device_attach(netdev);
- if(adapter->hw.mac_type >= e1000_82540 &&
+ if (adapter->hw.mac_type >= e1000_82540 &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
manc &= ~(E1000_MANC_ARP_EN);
E1000_WRITE_REG(&adapter->hw, MANC, manc);
}
- switch(adapter->hw.mac_type) {
- case e1000_82571:
- case e1000_82572:
- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
- ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
- break;
- case e1000_82573:
- swsm = E1000_READ_REG(&adapter->hw, SWSM);
- E1000_WRITE_REG(&adapter->hw, SWSM,
- swsm | E1000_SWSM_DRV_LOAD);
- break;
- default:
- break;
- }
+ /* If the controller is 82573 and f/w is AMT, do not set
+ * DRV_LOAD until the interface is up. For all other cases,
+ * let the f/w know that the h/w is now under the control
+ * of the driver. */
+ if (adapter->hw.mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(&adapter->hw))
+ e1000_get_hw_control(adapter);
return 0;
}
@@ -4327,6 +4732,9 @@ e1000_netpoll(struct net_device *netdev)
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL);
e1000_clean_tx_irq(adapter, adapter->tx_ring);
+#ifndef CONFIG_E1000_NAPI
+ adapter->clean_rx(adapter, adapter->rx_ring);
+#endif
enable_irq(adapter->pdev->irq);
}
#endif
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index aac64de61437..9790db974dc1 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -47,7 +47,7 @@
BUG(); \
} else { \
msleep(x); \
- } } while(0)
+ } } while (0)
/* Some workarounds require millisecond delays and are run during interrupt
* context. Most notably, when establishing link, the phy may need tweaking
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index ccbbe5ad8e0f..3768d83cd577 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
*
* Valid Range: 100-100000 (0=off, 1=dynamic)
*
- * Default Value: 1
+ * Default Value: 8000
*/
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
@@ -227,7 +227,7 @@ static int __devinit
e1000_validate_option(int *value, struct e1000_option *opt,
struct e1000_adapter *adapter)
{
- if(*value == OPTION_UNSET) {
+ if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
@@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
}
break;
case range_option:
- if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
DPRINTK(PROBE, INFO,
"%s set to %i\n", opt->name, *value);
return 0;
@@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt,
int i;
struct e1000_opt_list *ent;
- for(i = 0; i < opt->arg.l.nr; i++) {
+ for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
- if(*value == ent->i) {
- if(ent->str[0] != '\0')
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
DPRINTK(PROBE, INFO, "%s\n", ent->str);
return 0;
}
@@ -291,7 +291,7 @@ void __devinit
e1000_check_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
- if(bd >= E1000_MAX_NIC) {
+ if (bd >= E1000_MAX_NIC) {
DPRINTK(PROBE, NOTICE,
"Warning: no configuration for board #%i\n", bd);
DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
@@ -315,12 +315,12 @@ e1000_check_options(struct e1000_adapter *adapter)
if (num_TxDescriptors > bd) {
tx_ring->count = TxDescriptors[bd];
e1000_validate_option(&tx_ring->count, &opt, adapter);
- E1000_ROUNDUP(tx_ring->count,
+ E1000_ROUNDUP(tx_ring->count,
REQ_TX_DESCRIPTOR_MULTIPLE);
} else {
tx_ring->count = opt.def;
}
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count;
}
{ /* Receive Descriptor Count */
@@ -341,12 +341,12 @@ e1000_check_options(struct e1000_adapter *adapter)
if (num_RxDescriptors > bd) {
rx_ring->count = RxDescriptors[bd];
e1000_validate_option(&rx_ring->count, &opt, adapter);
- E1000_ROUNDUP(rx_ring->count,
+ E1000_ROUNDUP(rx_ring->count,
REQ_RX_DESCRIPTOR_MULTIPLE);
} else {
rx_ring->count = opt.def;
}
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count;
}
{ /* Checksum Offload Enable/Disable */
@@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter)
e1000_validate_option(&fc, &opt, adapter);
adapter->hw.fc = adapter->hw.original_fc = fc;
} else {
- adapter->hw.fc = opt.def;
+ adapter->hw.fc = adapter->hw.original_fc = opt.def;
}
}
{ /* Transmit Interrupt Delay */
@@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter)
if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd];
- e1000_validate_option(&adapter->tx_int_delay, &opt,
+ e1000_validate_option(&adapter->tx_int_delay, &opt,
adapter);
} else {
adapter->tx_int_delay = opt.def;
@@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter)
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
- e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
@@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter)
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
- e1000_validate_option(&adapter->rx_int_delay, &opt,
+ e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter);
} else {
adapter->rx_int_delay = opt.def;
@@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter)
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
- e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
@@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter)
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
- switch(adapter->itr) {
+ switch (adapter->itr) {
case 0:
- DPRINTK(PROBE, INFO, "%s turned off\n",
+ DPRINTK(PROBE, INFO, "%s turned off\n",
opt.name);
break;
case 1:
- DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
opt.name);
break;
default:
- e1000_validate_option(&adapter->itr, &opt,
+ e1000_validate_option(&adapter->itr, &opt,
adapter);
break;
}
@@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter)
}
}
- switch(adapter->hw.media_type) {
+ switch (adapter->hw.media_type) {
case e1000_media_type_fiber:
case e1000_media_type_internal_serdes:
e1000_check_fiber_options(adapter);
@@ -518,17 +518,17 @@ static void __devinit
e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
- if(num_Speed > bd) {
+ if (num_Speed > bd) {
DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
"parameter ignored\n");
}
- if(num_Duplex > bd) {
+ if (num_Duplex > bd) {
DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
"parameter ignored\n");
}
- if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+ if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
"not valid for fiber adapters, "
"parameter ignored\n");
@@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = dplx_list }}
};
+ if (e1000_check_phy_reset_block(&adapter->hw)) {
+ DPRINTK(PROBE, INFO,
+ "Link active due to SoL/IDER Session. "
+ "Speed/Duplex/AutoNeg parameter ignored.\n");
+ return;
+ }
if (num_Duplex > bd) {
dplx = Duplex[bd];
e1000_validate_option(&dplx, &opt, adapter);
@@ -592,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
}
}
- if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+ if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
DPRINTK(PROBE, INFO,
"AutoNeg specified along with Speed or Duplex, "
"parameter ignored\n");
@@ -653,7 +659,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
switch (speed + dplx) {
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
- if((num_Speed > bd) && (speed != 0 || dplx != 0))
+ if ((num_Speed > bd) && (speed != 0 || dplx != 0))
DPRINTK(PROBE, INFO,
"Speed and duplex autonegotiation enabled\n");
break;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 22c3a37bba5a..40ae36b20c9d 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -35,6 +35,8 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
#include <linux/bitops.h>
#include <linux/delay.h>
@@ -55,13 +57,15 @@
/* Constants */
#define VLAN_HLEN 4
#define FCS_LEN 4
-#define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
+#define DMA_ALIGN 8 /* hw requires 8-byte alignment */
+#define HW_IP_ALIGN 2 /* hw aligns IP header */
+#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
-#define INT_CAUSE_UNMASK_ALL 0x0007ffff
-#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
-#define INT_CAUSE_MASK_ALL 0x00000000
-#define INT_CAUSE_MASK_ALL_EXT 0x00000000
+#define INT_UNMASK_ALL 0x0007ffff
+#define INT_UNMASK_ALL_EXT 0x0011ffff
+#define INT_MASK_ALL 0x00000000
+#define INT_MASK_ALL_EXT 0x00000000
#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
@@ -78,8 +82,9 @@
static int eth_port_link_is_up(unsigned int eth_port_num);
static void eth_port_uc_addr_get(struct net_device *dev,
unsigned char *MacAddr);
-static int mv643xx_eth_real_open(struct net_device *);
-static int mv643xx_eth_real_stop(struct net_device *);
+static void eth_port_set_multicast_list(struct net_device *);
+static int mv643xx_eth_open(struct net_device *);
+static int mv643xx_eth_stop(struct net_device *);
static int mv643xx_eth_change_mtu(struct net_device *, int);
static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *);
static void eth_port_init_mac_tables(unsigned int eth_port_num);
@@ -124,15 +129,8 @@ static inline void mv_write(int offset, u32 data)
*/
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
{
- struct mv643xx_private *mp = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&mp->lock, flags);
-
- if ((new_mtu > 9500) || (new_mtu < 64)) {
- spin_unlock_irqrestore(&mp->lock, flags);
+ if ((new_mtu > 9500) || (new_mtu < 64))
return -EINVAL;
- }
dev->mtu = new_mtu;
/*
@@ -142,17 +140,13 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
* to memory is full, which might fail the open function.
*/
if (netif_running(dev)) {
- if (mv643xx_eth_real_stop(dev))
- printk(KERN_ERR
- "%s: Fatal error on stopping device\n",
- dev->name);
- if (mv643xx_eth_real_open(dev))
+ mv643xx_eth_stop(dev);
+ if (mv643xx_eth_open(dev))
printk(KERN_ERR
"%s: Fatal error on opening device\n",
dev->name);
}
- spin_unlock_irqrestore(&mp->lock, flags);
return 0;
}
@@ -170,15 +164,19 @@ static void mv643xx_eth_rx_task(void *data)
struct mv643xx_private *mp = netdev_priv(dev);
struct pkt_info pkt_info;
struct sk_buff *skb;
+ int unaligned;
if (test_and_set_bit(0, &mp->rx_task_busy))
panic("%s: Error in test_set_bit / clear_bit", dev->name);
while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
- skb = dev_alloc_skb(RX_SKB_SIZE);
+ skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
if (!skb)
break;
mp->rx_ring_skbs++;
+ unaligned = (u32)skb->data & (DMA_ALIGN - 1);
+ if (unaligned)
+ skb_reserve(skb, DMA_ALIGN - unaligned);
pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
pkt_info.byte_cnt = RX_SKB_SIZE;
pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE,
@@ -189,7 +187,7 @@ static void mv643xx_eth_rx_task(void *data)
"%s: Error allocating RX Ring\n", dev->name);
break;
}
- skb_reserve(skb, 2);
+ skb_reserve(skb, HW_IP_ALIGN);
}
clear_bit(0, &mp->rx_task_busy);
/*
@@ -207,7 +205,7 @@ static void mv643xx_eth_rx_task(void *data)
else {
/* Return interrupts */
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
- INT_CAUSE_UNMASK_ALL);
+ INT_UNMASK_ALL);
}
#endif
}
@@ -267,6 +265,8 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config);
+
+ eth_port_set_multicast_list(dev);
}
/*
@@ -342,8 +342,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
if (!(eth_int_cause_ext & (BIT0 | BIT8)))
return released;
- spin_lock(&mp->lock);
-
/* Check only queue 0 */
while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
if (pkt_info.cmd_sts & BIT0) {
@@ -351,31 +349,21 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
stats->tx_errors++;
}
- /*
- * If return_info is different than 0, release the skb.
- * The case where return_info is not 0 is only in case
- * when transmitted a scatter/gather packet, where only
- * last skb releases the whole chain.
- */
- if (pkt_info.return_info) {
- if (skb_shinfo(pkt_info.return_info)->nr_frags)
- dma_unmap_page(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- DMA_TO_DEVICE);
- else
- dma_unmap_single(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- DMA_TO_DEVICE);
+ if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
+ dma_unmap_single(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ DMA_TO_DEVICE);
+ if (pkt_info.return_info) {
dev_kfree_skb_irq(pkt_info.return_info);
released = 0;
- } else
- dma_unmap_page(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt, DMA_TO_DEVICE);
+ }
}
- spin_unlock(&mp->lock);
-
return released;
}
@@ -482,12 +470,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
/* Read interrupt cause registers */
eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
- INT_CAUSE_UNMASK_ALL;
+ INT_UNMASK_ALL;
if (eth_int_cause & BIT1)
eth_int_cause_ext = mv_read(
MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
- INT_CAUSE_UNMASK_ALL_EXT;
+ INT_UNMASK_ALL_EXT;
#ifdef MV643XX_NAPI
if (!(eth_int_cause & 0x0007fffd)) {
@@ -512,9 +500,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
} else {
if (netif_rx_schedule_prep(dev)) {
/* Mask all the interrupts */
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
- mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG
- (port_num), 0);
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
+ INT_MASK_ALL);
+ /* wait for previous write to complete */
+ mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
__netif_rx_schedule(dev);
}
#else
@@ -527,9 +516,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
* with skb's.
*/
#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
- /* Unmask all interrupts on ethernet port */
+ /* Mask all interrupts on ethernet port */
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
- INT_CAUSE_MASK_ALL);
+ INT_MASK_ALL);
+ /* wait for previous write to take effect */
+ mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
+
queue_task(&mp->rx_task, &tq_immediate);
mark_bh(IMMEDIATE_BH);
#else
@@ -636,56 +628,6 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
}
/*
- * mv643xx_eth_open
- *
- * This function is called when openning the network device. The function
- * should initialize all the hardware, initialize cyclic Rx/Tx
- * descriptors chain and buffers and allocate an IRQ to the network
- * device.
- *
- * Input : a pointer to the network device structure
- *
- * Output : zero of success , nonzero if fails.
- */
-
-static int mv643xx_eth_open(struct net_device *dev)
-{
- struct mv643xx_private *mp = netdev_priv(dev);
- unsigned int port_num = mp->port_num;
- int err;
-
- spin_lock_irq(&mp->lock);
-
- err = request_irq(dev->irq, mv643xx_eth_int_handler,
- SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
-
- if (err) {
- printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
- port_num);
- err = -EAGAIN;
- goto out;
- }
-
- if (mv643xx_eth_real_open(dev)) {
- printk("%s: Error opening interface\n", dev->name);
- err = -EBUSY;
- goto out_free;
- }
-
- spin_unlock_irq(&mp->lock);
-
- return 0;
-
-out_free:
- free_irq(dev->irq, dev);
-
-out:
- spin_unlock_irq(&mp->lock);
-
- return err;
-}
-
-/*
* ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
*
* DESCRIPTION:
@@ -777,28 +719,37 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
mp->port_tx_queue_command |= 1;
}
-/* Helper function for mv643xx_eth_open */
-static int mv643xx_eth_real_open(struct net_device *dev)
+/*
+ * mv643xx_eth_open
+ *
+ * This function is called when openning the network device. The function
+ * should initialize all the hardware, initialize cyclic Rx/Tx
+ * descriptors chain and buffers and allocate an IRQ to the network
+ * device.
+ *
+ * Input : a pointer to the network device structure
+ *
+ * Output : zero of success , nonzero if fails.
+ */
+
+static int mv643xx_eth_open(struct net_device *dev)
{
struct mv643xx_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
unsigned int size;
+ int err;
+
+ err = request_irq(dev->irq, mv643xx_eth_int_handler,
+ SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+ if (err) {
+ printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
+ port_num);
+ return -EAGAIN;
+ }
/* Stop RX Queues */
mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
- /* Clear the ethernet port interrupts */
- mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
- mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
-
- /* Unmask RX buffer and TX end interrupt */
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
- INT_CAUSE_UNMASK_ALL);
-
- /* Unmask phy and link status changes interrupts */
- mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
- INT_CAUSE_UNMASK_ALL_EXT);
-
/* Set the MAC Address */
memcpy(mp->port_mac_addr, dev->dev_addr, 6);
@@ -818,14 +769,15 @@ static int mv643xx_eth_real_open(struct net_device *dev)
GFP_KERNEL);
if (!mp->rx_skb) {
printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_free_irq;
}
mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
GFP_KERNEL);
if (!mp->tx_skb) {
printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
- kfree(mp->rx_skb);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_free_rx_skb;
}
/* Allocate TX ring */
@@ -845,9 +797,8 @@ static int mv643xx_eth_real_open(struct net_device *dev)
if (!mp->p_tx_desc_area) {
printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
dev->name, size);
- kfree(mp->rx_skb);
- kfree(mp->tx_skb);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_free_tx_skb;
}
BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
@@ -874,13 +825,12 @@ static int mv643xx_eth_real_open(struct net_device *dev)
printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
dev->name);
if (mp->rx_sram_size)
- iounmap(mp->p_rx_desc_area);
+ iounmap(mp->p_tx_desc_area);
else
dma_free_coherent(NULL, mp->tx_desc_area_size,
mp->p_tx_desc_area, mp->tx_desc_dma);
- kfree(mp->rx_skb);
- kfree(mp->tx_skb);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_free_tx_skb;
}
memset((void *)mp->p_rx_desc_area, 0, size);
@@ -900,9 +850,26 @@ static int mv643xx_eth_real_open(struct net_device *dev)
mp->tx_int_coal =
eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
- netif_start_queue(dev);
+ /* Clear any pending ethernet port interrupts */
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+
+ /* Unmask phy and link status changes interrupts */
+ mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
+ INT_UNMASK_ALL_EXT);
+ /* Unmask RX buffer and TX end interrupt */
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
return 0;
+
+out_free_tx_skb:
+ kfree(mp->tx_skb);
+out_free_rx_skb:
+ kfree(mp->rx_skb);
+out_free_irq:
+ free_irq(dev->irq, dev);
+
+ return err;
}
static void mv643xx_eth_free_tx_rings(struct net_device *dev)
@@ -910,14 +877,17 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
struct mv643xx_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
unsigned int curr;
+ struct sk_buff *skb;
/* Stop Tx Queues */
mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
/* Free outstanding skb's on TX rings */
for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) {
- if (mp->tx_skb[curr]) {
- dev_kfree_skb(mp->tx_skb[curr]);
+ skb = mp->tx_skb[curr];
+ if (skb) {
+ mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags;
+ dev_kfree_skb(skb);
mp->tx_ring_skbs--;
}
}
@@ -973,44 +943,32 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
* Output : zero if success , nonzero if fails
*/
-/* Helper function for mv643xx_eth_stop */
-
-static int mv643xx_eth_real_stop(struct net_device *dev)
+static int mv643xx_eth_stop(struct net_device *dev)
{
struct mv643xx_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
+ /* Mask all interrupts on ethernet port */
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
+ /* wait for previous write to complete */
+ mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
+
+#ifdef MV643XX_NAPI
+ netif_poll_disable(dev);
+#endif
netif_carrier_off(dev);
netif_stop_queue(dev);
- mv643xx_eth_free_tx_rings(dev);
- mv643xx_eth_free_rx_rings(dev);
-
eth_port_reset(mp->port_num);
- /* Disable ethernet port interrupts */
- mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
- mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
-
- /* Mask RX buffer and TX end interrupt */
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
-
- /* Mask phy and link status changes interrupts */
- mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
-
- return 0;
-}
-
-static int mv643xx_eth_stop(struct net_device *dev)
-{
- struct mv643xx_private *mp = netdev_priv(dev);
-
- spin_lock_irq(&mp->lock);
+ mv643xx_eth_free_tx_rings(dev);
+ mv643xx_eth_free_rx_rings(dev);
- mv643xx_eth_real_stop(dev);
+#ifdef MV643XX_NAPI
+ netif_poll_enable(dev);
+#endif
free_irq(dev->irq, dev);
- spin_unlock_irq(&mp->lock);
return 0;
}
@@ -1022,20 +980,17 @@ static void mv643xx_tx(struct net_device *dev)
struct pkt_info pkt_info;
while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
- if (pkt_info.return_info) {
- if (skb_shinfo(pkt_info.return_info)->nr_frags)
- dma_unmap_page(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- DMA_TO_DEVICE);
- else
- dma_unmap_single(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- DMA_TO_DEVICE);
+ if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
+ dma_unmap_single(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(NULL, pkt_info.buf_ptr,
+ pkt_info.byte_cnt,
+ DMA_TO_DEVICE);
+ if (pkt_info.return_info)
dev_kfree_skb_irq(pkt_info.return_info);
- } else
- dma_unmap_page(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt, DMA_TO_DEVICE);
}
if (netif_queue_stopped(dev) &&
@@ -1053,14 +1008,11 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
struct mv643xx_private *mp = netdev_priv(dev);
int done = 1, orig_budget, work_done;
unsigned int port_num = mp->port_num;
- unsigned long flags;
#ifdef MV643XX_TX_FAST_REFILL
if (++mp->tx_clean_threshold > 5) {
- spin_lock_irqsave(&mp->lock, flags);
mv643xx_tx(dev);
mp->tx_clean_threshold = 0;
- spin_unlock_irqrestore(&mp->lock, flags);
}
#endif
@@ -1078,21 +1030,36 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
}
if (done) {
- spin_lock_irqsave(&mp->lock, flags);
- __netif_rx_complete(dev);
+ netif_rx_complete(dev);
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
- INT_CAUSE_UNMASK_ALL);
- mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
- INT_CAUSE_UNMASK_ALL_EXT);
- spin_unlock_irqrestore(&mp->lock, flags);
+ INT_UNMASK_ALL);
}
return done ? 0 : 1;
}
#endif
+/* Hardware can't handle unaligned fragments smaller than 9 bytes.
+ * This helper function detects that case.
+ */
+
+static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
+{
+ unsigned int frag;
+ skb_frag_t *fragp;
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ fragp = &skb_shinfo(skb)->frags[frag];
+ if (fragp->size <= 8 && fragp->page_offset & 0x7)
+ return 1;
+
+ }
+ return 0;
+}
+
+
/*
* mv643xx_eth_start_xmit
*
@@ -1136,12 +1103,19 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 1;
}
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+ if (has_tiny_unaligned_frags(skb)) {
+ if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
+ stats->tx_dropped++;
+ printk(KERN_DEBUG "%s: failed to linearize tiny "
+ "unaligned fragment\n", dev->name);
+ return 1;
+ }
+ }
+
spin_lock_irqsave(&mp->lock, flags);
- /* Update packet info data structure -- DMA owned, first last */
-#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
if (!skb_shinfo(skb)->nr_frags) {
-linear:
if (skb->ip_summed != CHECKSUM_HW) {
/* Errata BTS #50, IHL must be 5 if no HW checksum */
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
@@ -1150,7 +1124,6 @@ linear:
5 << ETH_TX_IHL_SHIFT;
pkt_info.l4i_chk = 0;
} else {
-
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
ETH_TX_FIRST_DESC |
ETH_TX_LAST_DESC |
@@ -1158,14 +1131,16 @@ linear:
ETH_GEN_IP_V_4_CHECKSUM |
skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
/* CPU already calculated pseudo header checksum. */
- if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ if ((skb->protocol == ETH_P_IP) &&
+ (skb->nh.iph->protocol == IPPROTO_UDP) ) {
pkt_info.cmd_sts |= ETH_UDP_FRAME;
pkt_info.l4i_chk = skb->h.uh->check;
- } else if (skb->nh.iph->protocol == IPPROTO_TCP)
+ } else if ((skb->protocol == ETH_P_IP) &&
+ (skb->nh.iph->protocol == IPPROTO_TCP))
pkt_info.l4i_chk = skb->h.th->check;
else {
printk(KERN_ERR
- "%s: chksum proto != TCP or UDP\n",
+ "%s: chksum proto != IPv4 TCP or UDP\n",
dev->name);
spin_unlock_irqrestore(&mp->lock, flags);
return 1;
@@ -1183,26 +1158,6 @@ linear:
} else {
unsigned int frag;
- /* Since hardware can't handle unaligned fragments smaller
- * than 9 bytes, if we find any, we linearize the skb
- * and start again. When I've seen it, it's always been
- * the first frag (probably near the end of the page),
- * but we check all frags to be safe.
- */
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *fragp;
-
- fragp = &skb_shinfo(skb)->frags[frag];
- if (fragp->size <= 8 && fragp->page_offset & 0x7) {
- skb_linearize(skb, GFP_ATOMIC);
- printk(KERN_DEBUG "%s: unaligned tiny fragment"
- "%d of %d, fixed\n",
- dev->name, frag,
- skb_shinfo(skb)->nr_frags);
- goto linear;
- }
- }
-
/* first frag which is skb header */
pkt_info.byte_cnt = skb_headlen(skb);
pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
@@ -1221,14 +1176,16 @@ linear:
ETH_GEN_IP_V_4_CHECKSUM |
skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
/* CPU already calculated pseudo header checksum. */
- if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ if ((skb->protocol == ETH_P_IP) &&
+ (skb->nh.iph->protocol == IPPROTO_UDP)) {
pkt_info.cmd_sts |= ETH_UDP_FRAME;
pkt_info.l4i_chk = skb->h.uh->check;
- } else if (skb->nh.iph->protocol == IPPROTO_TCP)
+ } else if ((skb->protocol == ETH_P_IP) &&
+ (skb->nh.iph->protocol == IPPROTO_TCP))
pkt_info.l4i_chk = skb->h.th->check;
else {
printk(KERN_ERR
- "%s: chksum proto != TCP or UDP\n",
+ "%s: chksum proto != IPv4 TCP or UDP\n",
dev->name);
spin_unlock_irqrestore(&mp->lock, flags);
return 1;
@@ -1288,6 +1245,8 @@ linear:
}
}
#else
+ spin_lock_irqsave(&mp->lock, flags);
+
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
ETH_TX_LAST_DESC;
pkt_info.l4i_chk = 0;
@@ -1340,39 +1299,18 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static inline void mv643xx_enable_irq(struct mv643xx_private *mp)
-{
- int port_num = mp->port_num;
- unsigned long flags;
-
- spin_lock_irqsave(&mp->lock, flags);
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
- INT_CAUSE_UNMASK_ALL);
- mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
- INT_CAUSE_UNMASK_ALL_EXT);
- spin_unlock_irqrestore(&mp->lock, flags);
-}
-
-static inline void mv643xx_disable_irq(struct mv643xx_private *mp)
-{
- int port_num = mp->port_num;
- unsigned long flags;
-
- spin_lock_irqsave(&mp->lock, flags);
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
- INT_CAUSE_MASK_ALL);
- mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
- INT_CAUSE_MASK_ALL_EXT);
- spin_unlock_irqrestore(&mp->lock, flags);
-}
-
static void mv643xx_netpoll(struct net_device *netdev)
{
struct mv643xx_private *mp = netdev_priv(netdev);
+ int port_num = mp->port_num;
+
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
+ /* wait for previous write to complete */
+ mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
- mv643xx_disable_irq(mp);
mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
- mv643xx_enable_irq(mp);
+
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
}
#endif
@@ -1441,7 +1379,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
* Zero copy can only work if we use Discovery II memory. Else, we will
* have to map the buffers to ISA memory which is only 16 MB
*/
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
#endif
#endif
@@ -2054,6 +1992,196 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
}
/*
+ * The entries in each table are indexed by a hash of a packet's MAC
+ * address. One bit in each entry determines whether the packet is
+ * accepted. There are 4 entries (each 8 bits wide) in each register
+ * of the table. The bits in each entry are defined as follows:
+ * 0 Accept=1, Drop=0
+ * 3-1 Queue (ETH_Q0=0)
+ * 7-4 Reserved = 0;
+ */
+static void eth_port_set_filter_table_entry(int table, unsigned char entry)
+{
+ unsigned int table_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */
+ reg_offset = entry % 4; /* Entry offset within the register */
+
+ /* Set "accepts frame bit" at specified table entry */
+ table_reg = mv_read(table + tbl_offset);
+ table_reg |= 0x01 << (8 * reg_offset);
+ mv_write(table + tbl_offset, table_reg);
+}
+
+/*
+ * eth_port_mc_addr - Multicast address settings.
+ *
+ * The MV device supports multicast using two tables:
+ * 1) Special Multicast Table for MAC addresses of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table.
+ * 2) Other Multicast Table for multicast of another type. A CRC-8bit
+ * is used as an index to the Other Multicast Table entries in the
+ * DA-Filter table. This function calculates the CRC-8bit value.
+ * In either case, eth_port_set_filter_table_entry() is then called
+ * to set to set the actual table entry.
+ */
+static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
+{
+ unsigned int mac_h;
+ unsigned int mac_l;
+ unsigned char crc_result = 0;
+ int table;
+ int mac_array[48];
+ int crc[8];
+ int i;
+
+ if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
+ (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
+ table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+ (eth_port_num);
+ eth_port_set_filter_table_entry(table, p_addr[5]);
+ return;
+ }
+
+ /* Calculate CRC-8 out of the given address */
+ mac_h = (p_addr[0] << 8) | (p_addr[1]);
+ mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
+ (p_addr[4] << 8) | (p_addr[5] << 0);
+
+ for (i = 0; i < 32; i++)
+ mac_array[i] = (mac_l >> i) & 0x1;
+ for (i = 32; i < 48; i++)
+ mac_array[i] = (mac_h >> (i - 32)) & 0x1;
+
+ crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
+ mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
+ mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
+ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
+ mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0];
+
+ crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
+ mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
+ mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
+ mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
+ mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
+ mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
+ mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0];
+
+ crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
+ mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
+ mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
+ mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
+ mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^
+ mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0];
+
+ crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
+ mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
+ mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
+ mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
+ mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^
+ mac_array[3] ^ mac_array[2] ^ mac_array[1];
+
+ crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
+ mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
+ mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
+ mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
+ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^
+ mac_array[3] ^ mac_array[2];
+
+ crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
+ mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
+ mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
+ mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
+ mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^
+ mac_array[4] ^ mac_array[3];
+
+ crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
+ mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
+ mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
+ mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
+ mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^
+ mac_array[4];
+
+ crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
+ mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
+ mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
+ mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
+ mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5];
+
+ for (i = 0; i < 8; i++)
+ crc_result = crc_result | (crc[i] << i);
+
+ table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
+ eth_port_set_filter_table_entry(table, crc_result);
+}
+
+/*
+ * Set the entire multicast list based on dev->mc_list.
+ */
+static void eth_port_set_multicast_list(struct net_device *dev)
+{
+
+ struct dev_mc_list *mc_list;
+ int i;
+ int table_index;
+ struct mv643xx_private *mp = netdev_priv(dev);
+ unsigned int eth_port_num = mp->port_num;
+
+ /* If the device is in promiscuous mode or in all multicast mode,
+ * we will fully populate both multicast tables with accept.
+ * This is guaranteed to yield a match on all multicast addresses...
+ */
+ if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
+ for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+ /* Set all entries in DA filter special multicast
+ * table (Ex_dFSMT)
+ * Set for ETH_Q0 for now
+ * Bits
+ * 0 Accept=1, Drop=0
+ * 3-1 Queue ETH_Q0=0
+ * 7-4 Reserved = 0;
+ */
+ mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+
+ /* Set all entries in DA filter other multicast
+ * table (Ex_dFOMT)
+ * Set for ETH_Q0 for now
+ * Bits
+ * 0 Accept=1, Drop=0
+ * 3-1 Queue ETH_Q0=0
+ * 7-4 Reserved = 0;
+ */
+ mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+ }
+ return;
+ }
+
+ /* We will clear out multicast tables every time we get the list.
+ * Then add the entire new list...
+ */
+ for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+ /* Clear DA filter special multicast table (Ex_dFSMT) */
+ mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+ (eth_port_num) + table_index, 0);
+
+ /* Clear DA filter other multicast table (Ex_dFOMT) */
+ mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
+ (eth_port_num) + table_index, 0);
+ }
+
+ /* Get pointer to net_device multicast list and add each one... */
+ for (i = 0, mc_list = dev->mc_list;
+ (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
+ i++, mc_list = mc_list->next)
+ if (mc_list->dmi_addrlen == 6)
+ eth_port_mc_addr(eth_port_num, mc_list->dmi_addr);
+}
+
+/*
* eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
*
* DESCRIPTION:
@@ -2080,11 +2208,11 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
for (table_index = 0; table_index <= 0xFC; table_index += 4) {
/* Clear DA filter special multicast table (Ex_dFSMT) */
- mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
- (eth_port_num) + table_index), 0);
+ mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+ (eth_port_num) + table_index, 0);
/* Clear DA filter other multicast table (Ex_dFOMT) */
- mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
- (eth_port_num) + table_index), 0);
+ mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
+ (eth_port_num) + table_index, 0);
}
}
@@ -2489,6 +2617,7 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
struct eth_tx_desc *current_descriptor;
struct eth_tx_desc *first_descriptor;
u32 command;
+ unsigned long flags;
/* Do not process Tx ring in case of Tx ring resource error */
if (mp->tx_resource_err)
@@ -2505,6 +2634,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
return ETH_ERROR;
}
+ spin_lock_irqsave(&mp->lock, flags);
+
mp->tx_ring_skbs++;
BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
@@ -2554,11 +2685,15 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
mp->tx_resource_err = 1;
mp->tx_curr_desc_q = tx_first_desc;
+ spin_unlock_irqrestore(&mp->lock, flags);
+
return ETH_QUEUE_LAST_RESOURCE;
}
mp->tx_curr_desc_q = tx_next_desc;
+ spin_unlock_irqrestore(&mp->lock, flags);
+
return ETH_OK;
}
#else
@@ -2569,11 +2704,14 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
int tx_desc_used;
struct eth_tx_desc *current_descriptor;
unsigned int command_status;
+ unsigned long flags;
/* Do not process Tx ring in case of Tx ring resource error */
if (mp->tx_resource_err)
return ETH_QUEUE_FULL;
+ spin_lock_irqsave(&mp->lock, flags);
+
mp->tx_ring_skbs++;
BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
@@ -2604,9 +2742,12 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
/* Check for ring index overlap in the Tx desc ring */
if (tx_desc_curr == tx_desc_used) {
mp->tx_resource_err = 1;
+
+ spin_unlock_irqrestore(&mp->lock, flags);
return ETH_QUEUE_LAST_RESOURCE;
}
+ spin_unlock_irqrestore(&mp->lock, flags);
return ETH_OK;
}
#endif
@@ -2629,23 +2770,27 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
* Tx ring 'first' and 'used' indexes are updated.
*
* RETURN:
- * ETH_ERROR in case the routine can not access Tx desc ring.
- * ETH_RETRY in case there is transmission in process.
- * ETH_END_OF_JOB if the routine has nothing to release.
- * ETH_OK otherwise.
+ * ETH_OK on success
+ * ETH_ERROR otherwise.
*
*/
static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
struct pkt_info *p_pkt_info)
{
int tx_desc_used;
+ int tx_busy_desc;
+ struct eth_tx_desc *p_tx_desc_used;
+ unsigned int command_status;
+ unsigned long flags;
+ int err = ETH_OK;
+
+ spin_lock_irqsave(&mp->lock, flags);
+
#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
- int tx_busy_desc = mp->tx_first_desc_q;
+ tx_busy_desc = mp->tx_first_desc_q;
#else
- int tx_busy_desc = mp->tx_curr_desc_q;
+ tx_busy_desc = mp->tx_curr_desc_q;
#endif
- struct eth_tx_desc *p_tx_desc_used;
- unsigned int command_status;
/* Get the Tx Desc ring indexes */
tx_desc_used = mp->tx_used_desc_q;
@@ -2653,22 +2798,30 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
/* Sanity check */
- if (p_tx_desc_used == NULL)
- return ETH_ERROR;
+ if (p_tx_desc_used == NULL) {
+ err = ETH_ERROR;
+ goto out;
+ }
/* Stop release. About to overlap the current available Tx descriptor */
- if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err)
- return ETH_END_OF_JOB;
+ if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) {
+ err = ETH_ERROR;
+ goto out;
+ }
command_status = p_tx_desc_used->cmd_sts;
/* Still transmitting... */
- if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
- return ETH_RETRY;
+ if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
+ err = ETH_ERROR;
+ goto out;
+ }
/* Pass the packet information to the caller */
p_pkt_info->cmd_sts = command_status;
p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
+ p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
+ p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
mp->tx_skb[tx_desc_used] = NULL;
/* Update the next descriptor to release. */
@@ -2680,7 +2833,10 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
BUG_ON(mp->tx_ring_skbs == 0);
mp->tx_ring_skbs--;
- return ETH_OK;
+out:
+ spin_unlock_irqrestore(&mp->lock, flags);
+
+ return err;
}
/*
@@ -2712,11 +2868,14 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
volatile struct eth_rx_desc *p_rx_desc;
unsigned int command_status;
+ unsigned long flags;
/* Do not process Rx ring in case of Rx ring resource error */
if (mp->rx_resource_err)
return ETH_QUEUE_FULL;
+ spin_lock_irqsave(&mp->lock, flags);
+
/* Get the Rx Desc ring 'curr and 'used' indexes */
rx_curr_desc = mp->rx_curr_desc_q;
rx_used_desc = mp->rx_used_desc_q;
@@ -2728,8 +2887,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
rmb();
/* Nothing to receive... */
- if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
+ if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
+ spin_unlock_irqrestore(&mp->lock, flags);
return ETH_END_OF_JOB;
+ }
p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
p_pkt_info->cmd_sts = command_status;
@@ -2749,6 +2910,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
if (rx_next_curr_desc == rx_used_desc)
mp->rx_resource_err = 1;
+ spin_unlock_irqrestore(&mp->lock, flags);
+
return ETH_OK;
}
@@ -2777,6 +2940,9 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
{
int used_rx_desc; /* Where to return Rx resource */
volatile struct eth_rx_desc *p_used_rx_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->lock, flags);
/* Get 'used' Rx descriptor */
used_rx_desc = mp->rx_used_desc_q;
@@ -2800,6 +2966,8 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
/* Any Rx return cancels the Rx resource error status */
mp->rx_resource_err = 0;
+ spin_unlock_irqrestore(&mp->lock, flags);
+
return ETH_OK;
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b538e3038058..bf55a4cfb3d2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3243,12 +3243,22 @@ static int __devinit skge_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)))
+ if (sizeof(dma_addr_t) > sizeof(u32) &&
+ !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
using_dac = 1;
- else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
- printk(KERN_ERR PFX "%s no usable DMA configuration\n",
- pci_name(pdev));
- goto err_out_free_regions;
+ err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (err < 0) {
+ printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
+ "for consistent allocations\n", pci_name(pdev));
+ goto err_out_free_regions;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ printk(KERN_ERR PFX "%s no usable DMA configuration\n",
+ pci_name(pdev));
+ goto err_out_free_regions;
+ }
}
#ifdef __BIG_ENDIAN
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index f5d697c0c031..f8b973a04b65 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -57,7 +57,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "0.11"
+#define DRV_VERSION "0.13"
#define PFX DRV_NAME " "
/*
@@ -75,6 +75,7 @@
#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
#define RX_DEF_PENDING RX_MAX_PENDING
+#define RX_SKB_ALIGN 8
#define TX_RING_SIZE 512
#define TX_DEF_PENDING (TX_RING_SIZE - 1)
@@ -91,7 +92,7 @@
static const u32 default_msg =
NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
| NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
- | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR;
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
static int debug = -1; /* defaults above */
module_param(debug, int, 0);
@@ -624,13 +625,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
}
-static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
+/* Assign Ram Buffer allocation.
+ * start and end are in units of 4k bytes
+ * ram registers are in units of 64bit words
+ */
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
{
- u32 end;
+ u32 start, end;
- start /= 8;
- len /= 8;
- end = start + len - 1;
+ start = startk * 4096/8;
+ end = (endk * 4096/8) - 1;
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -639,14 +643,19 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
sky2_write32(hw, RB_ADDR(q, RB_RP), start);
if (q == Q_R1 || q == Q_R2) {
- u32 rxup, rxlo;
+ u32 space = (endk - startk) * 4096/8;
+ u32 tp = space - space/4;
- rxlo = len/2;
- rxup = rxlo + len/4;
+ /* On receive queue's set the thresholds
+ * give receiver priority when > 3/4 full
+ * send pause when down to 2K
+ */
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
- /* Set thresholds on receive queue's */
- sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup);
- sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo);
+ tp = space - 2048/8;
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
} else {
/* Enable store & forward on Tx queue's because
* Tx FIFO is only 1K on Yukon
@@ -695,9 +704,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
* This is a workaround code taken from SysKonnect sk98lin driver
* to deal with chip bug on Yukon EC rev 0 in the wraparound case.
*/
-static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
+static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
u16 idx, u16 *last, u16 size)
{
+ wmb();
if (is_ec_a1(hw) && idx < *last) {
u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
@@ -721,6 +731,7 @@ setnew:
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
}
*last = idx;
+ mmiowb();
}
@@ -734,11 +745,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
/* Return high part of DMA address (could be 32 or 64 bit) */
static inline u32 high32(dma_addr_t a)
{
- return (a >> 16) >> 16;
+ return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
}
/* Build description to hardware about buffer */
-static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
+static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
{
struct sky2_rx_le *le;
u32 hi = high32(map);
@@ -878,13 +889,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
- spin_lock(&sky2->tx_lock);
+ spin_lock_bh(&sky2->tx_lock);
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
sky2->vlgrp = grp;
- spin_unlock(&sky2->tx_lock);
+ spin_unlock_bh(&sky2->tx_lock);
}
static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -893,27 +904,42 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
- spin_lock(&sky2->tx_lock);
+ spin_lock_bh(&sky2->tx_lock);
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
if (sky2->vlgrp)
sky2->vlgrp->vlan_devices[vid] = NULL;
- spin_unlock(&sky2->tx_lock);
+ spin_unlock_bh(&sky2->tx_lock);
}
#endif
/*
+ * It appears the hardware has a bug in the FIFO logic that
+ * cause it to hang if the FIFO gets overrun and the receive buffer
+ * is not aligned. ALso alloc_skb() won't align properly if slab
+ * debugging is enabled.
+ */
+static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
+ if (likely(skb)) {
+ unsigned long p = (unsigned long) skb->data;
+ skb_reserve(skb,
+ ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
+ }
+
+ return skb;
+}
+
+/*
* Allocate and setup receiver buffer pool.
* In case of 64 bit dma, there are 2X as many list elements
* available as ring entries
* and need to reserve one list element so we don't wrap around.
- *
- * It appears the hardware has a bug in the FIFO logic that
- * cause it to hang if the FIFO gets overrun and the receive buffer
- * is not aligned. This means we can't use skb_reserve to align
- * the IP header.
*/
static int sky2_rx_start(struct sky2_port *sky2)
{
@@ -929,7 +955,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
for (i = 0; i < sky2->rx_pending; i++) {
struct ring_info *re = sky2->rx_ring + i;
- re->skb = dev_alloc_skb(sky2->rx_bufsize);
+ re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
if (!re->skb)
goto nomem;
@@ -986,19 +1012,19 @@ static int sky2_up(struct net_device *dev)
sky2_mac_init(hw, port);
- /* Configure RAM buffers */
- if (hw->chip_id == CHIP_ID_YUKON_FE ||
- (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
- ramsize = 4096;
- else {
- u8 e0 = sky2_read8(hw, B2_E_0);
- ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096);
- }
+ /* Determine available ram buffer space (in 4K blocks).
+ * Note: not sure about the FE setting below yet
+ */
+ if (hw->chip_id == CHIP_ID_YUKON_FE)
+ ramsize = 4;
+ else
+ ramsize = sky2_read8(hw, B2_E_0);
+
+ /* Give transmitter one third (rounded up) */
+ rxspace = ramsize - (ramsize + 2) / 3;
- /* 2/3 for Rx */
- rxspace = (2 * ramsize) / 3;
sky2_ramset(hw, rxqaddr[port], 0, rxspace);
- sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
+ sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
/* Make sure SyncQ is disabled */
sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
@@ -1054,7 +1080,7 @@ static inline int tx_avail(const struct sky2_port *sky2)
}
/* Estimate of number of transmit list elements required */
-static inline unsigned tx_le_req(const struct sk_buff *skb)
+static unsigned tx_le_req(const struct sk_buff *skb)
{
unsigned count;
@@ -1090,6 +1116,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
u16 mss;
u8 ctrl;
+ /* No BH disabling for tx_lock here. We are running in BH disabled
+ * context and TX reclaim runs via poll inside of a software
+ * interrupt, and no related locks in IRQ processing.
+ */
if (!spin_trylock(&sky2->tx_lock))
return NETDEV_TX_LOCKED;
@@ -1099,8 +1129,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
*/
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
- printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
- dev->name);
+ if (net_ratelimit())
+ printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
+ dev->name);
}
spin_unlock(&sky2->tx_lock);
@@ -1199,7 +1230,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
- addr64 = (mapping >> 16) >> 16;
+ addr64 = high32(mapping);
if (addr64 != sky2->tx_addr64) {
le = get_tx_le(sky2);
le->tx.addr = cpu_to_le32(addr64);
@@ -1229,7 +1260,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
out_unlock:
- mmiowb();
spin_unlock(&sky2->tx_lock);
dev->trans_start = jiffies;
@@ -1282,17 +1312,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
dev_kfree_skb_any(skb);
}
- spin_lock(&sky2->tx_lock);
sky2->tx_cons = put;
if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
netif_wake_queue(dev);
- spin_unlock(&sky2->tx_lock);
}
/* Cleanup all untransmitted buffers, assume transmitter not running */
static void sky2_tx_clean(struct sky2_port *sky2)
{
+ spin_lock_bh(&sky2->tx_lock);
sky2_tx_complete(sky2, sky2->tx_prod);
+ spin_unlock_bh(&sky2->tx_lock);
}
/* Network shutdown */
@@ -1582,28 +1612,40 @@ out:
local_irq_enable();
}
+
+/* Transmit timeout is only called if we are running, carries is up
+ * and tx queue is full (stopped).
+ */
static void sky2_tx_timeout(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned txq = txqaddr[sky2->port];
+ u16 ridx;
+
+ /* Maybe we just missed an status interrupt */
+ spin_lock(&sky2->tx_lock);
+ ridx = sky2_read16(hw,
+ sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
+ sky2_tx_complete(sky2, ridx);
+ spin_unlock(&sky2->tx_lock);
+
+ if (!netif_queue_stopped(dev)) {
+ if (net_ratelimit())
+ pr_info(PFX "transmit interrupt missed? recovered\n");
+ return;
+ }
if (netif_msg_timer(sky2))
printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
- netif_stop_queue(dev);
-
sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
- sky2_read32(hw, Q_ADDR(txq, Q_CSR));
-
sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
sky2_tx_clean(sky2);
sky2_qset(hw, txq);
sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
-
- netif_wake_queue(dev);
}
@@ -1713,7 +1755,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
} else {
struct sk_buff *nskb;
- nskb = dev_alloc_skb(sky2->rx_bufsize);
+ nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
if (!nskb)
goto resubmit;
@@ -1745,7 +1787,7 @@ oversize:
error:
++sky2->net_stats.rx_errors;
- if (netif_msg_rx_err(sky2))
+ if (netif_msg_rx_err(sky2) && net_ratelimit())
printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
sky2->netdev->name, status, length);
@@ -1766,13 +1808,16 @@ error:
*/
#define TX_NO_STATUS 0xffff
-static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
+static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
{
if (last != TX_NO_STATUS) {
struct net_device *dev = hw->dev[port];
if (dev && netif_running(dev)) {
struct sky2_port *sky2 = netdev_priv(dev);
+
+ spin_lock(&sky2->tx_lock);
sky2_tx_complete(sky2, last);
+ spin_unlock(&sky2->tx_lock);
}
}
}
@@ -1800,7 +1845,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
struct sk_buff *skb;
u32 status;
u16 length;
- u8 op;
le = hw->st_le + hw->st_idx;
hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
@@ -1814,10 +1858,8 @@ static int sky2_poll(struct net_device *dev0, int *budget)
sky2 = netdev_priv(dev);
status = le32_to_cpu(le->status);
length = le16_to_cpu(le->length);
- op = le->opcode & ~HW_OWNER;
- le->opcode = 0;
- switch (op) {
+ switch (le->opcode & ~HW_OWNER) {
case OP_RXSTAT:
skb = sky2_receive(sky2, length, status);
if (!skb)
@@ -1865,14 +1907,13 @@ static int sky2_poll(struct net_device *dev0, int *budget)
default:
if (net_ratelimit())
printk(KERN_WARNING PFX
- "unknown status opcode 0x%x\n", op);
+ "unknown status opcode 0x%x\n", le->opcode);
break;
}
}
exit_loop:
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
- mmiowb();
sky2_tx_check(hw, 0, tx_done[0]);
sky2_tx_check(hw, 1, tx_done[1]);
@@ -1887,7 +1928,6 @@ exit_loop:
netif_rx_complete(dev0);
hw->intr_mask |= Y2_IS_STAT_BMU;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
- mmiowb();
return 0;
} else {
*budget -= work_done;
@@ -1900,35 +1940,42 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
{
struct net_device *dev = hw->dev[port];
- printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
- dev->name, status);
+ if (net_ratelimit())
+ printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
+ dev->name, status);
if (status & Y2_IS_PAR_RD1) {
- printk(KERN_ERR PFX "%s: ram data read parity error\n",
- dev->name);
+ if (net_ratelimit())
+ printk(KERN_ERR PFX "%s: ram data read parity error\n",
+ dev->name);
/* Clear IRQ */
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
}
if (status & Y2_IS_PAR_WR1) {
- printk(KERN_ERR PFX "%s: ram data write parity error\n",
- dev->name);
+ if (net_ratelimit())
+ printk(KERN_ERR PFX "%s: ram data write parity error\n",
+ dev->name);
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
}
if (status & Y2_IS_PAR_MAC1) {
- printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
+ if (net_ratelimit())
+ printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
}
if (status & Y2_IS_PAR_RX1) {
- printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
+ if (net_ratelimit())
+ printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
}
if (status & Y2_IS_TCP_TXA1) {
- printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name);
+ if (net_ratelimit())
+ printk(KERN_ERR PFX "%s: TCP segmentation error\n",
+ dev->name);
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
}
}
@@ -1944,8 +1991,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
u16 pci_err;
pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
- printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
- pci_name(hw->pdev), pci_err);
+ if (net_ratelimit())
+ printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
+ pci_name(hw->pdev), pci_err);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_write_config_word(hw->pdev, PCI_STATUS,
@@ -1959,8 +2007,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
- printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
- pci_name(hw->pdev), pex_err);
+ if (net_ratelimit())
+ printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
+ pci_name(hw->pdev), pex_err);
/* clear the interrupt */
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2250,7 +2299,7 @@ static int sky2_reset(struct sky2_hw *hw)
return 0;
}
-static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
+static u32 sky2_supported_modes(const struct sky2_hw *hw)
{
u32 modes;
if (hw->copper) {
@@ -2995,7 +3044,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
return dev;
}
-static inline void sky2_show_addr(struct net_device *dev)
+static void __devinit sky2_show_addr(struct net_device *dev)
{
const struct sky2_port *sky2 = netdev_priv(dev);
@@ -3038,13 +3087,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_regions;
}
- if (sizeof(dma_addr_t) > sizeof(u32)) {
- err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
- if (!err)
- using_dac = 1;
- }
+ if (sizeof(dma_addr_t) > sizeof(u32) &&
+ !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+ using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (err < 0) {
+ printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
+ "for consistent allocations\n", pci_name(pdev));
+ goto err_out_free_regions;
+ }
- if (!using_dac) {
+ } else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
printk(KERN_ERR PFX "%s no usable DMA configuration\n",
@@ -3052,6 +3105,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_regions;
}
}
+
#ifdef __BIG_ENDIAN
/* byte swap descriptors in hardware */
{
@@ -3064,14 +3118,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
#endif
err = -ENOMEM;
- hw = kmalloc(sizeof(*hw), GFP_KERNEL);
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw) {
printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
pci_name(pdev));
goto err_out_free_regions;
}
- memset(hw, 0, sizeof(*hw));
hw->pdev = pdev;
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 0d765f1733b5..1f5975a61e1f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -22,7 +22,6 @@
*/
#include <linux/config.h>
-
#include <linux/compiler.h>
#include <linux/crc32.h>
#include <linux/delay.h>
@@ -30,6 +29,7 @@
#include <linux/ethtool.h>
#include <linux/firmware.h>
#include <linux/if_vlan.h>
+#include <linux/in.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/ip.h>
@@ -43,6 +43,7 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
+#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <asm/bitops.h>
@@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
writel(value, card->regs + reg);
}
-/**
- * spider_net_write_reg_sync - writes to an SMMIO register of a card
- * @card: device structure
- * @reg: register to write to
- * @value: value to write into the specified SMMIO register
- *
- * Unlike spider_net_write_reg, this will also make sure the
- * data arrives on the card by reading the reg again.
- */
-static void
-spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
-{
- value = cpu_to_le32(value);
- writel(value, card->regs + reg);
- (void)readl(card->regs + reg);
-}
-
-/**
- * spider_net_rx_irq_off - switch off rx irq on this spider card
- * @card: device structure
- *
- * switches off rx irq by masking them out in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_off(struct spider_net_card *card)
-{
- u32 regvalue;
- unsigned long flags;
-
- spin_lock_irqsave(&card->intmask_lock, flags);
- regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
- regvalue &= ~SPIDER_NET_RXINT;
- spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
- spin_unlock_irqrestore(&card->intmask_lock, flags);
-}
-
/** spider_net_write_phy - write to phy register
* @netdev: adapter to be written to
* @mii_id: id of MII
@@ -199,60 +164,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
}
/**
- * spider_net_rx_irq_on - switch on rx irq on this spider card
- * @card: device structure
- *
- * switches on rx irq by enabling them in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_on(struct spider_net_card *card)
-{
- u32 regvalue;
- unsigned long flags;
-
- spin_lock_irqsave(&card->intmask_lock, flags);
- regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
- regvalue |= SPIDER_NET_RXINT;
- spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
- spin_unlock_irqrestore(&card->intmask_lock, flags);
-}
-
-/**
- * spider_net_tx_irq_off - switch off tx irq on this spider card
+ * spider_net_rx_irq_off - switch off rx irq on this spider card
* @card: device structure
*
- * switches off tx irq by masking them out in the GHIINTnMSK register
+ * switches off rx irq by masking them out in the GHIINTnMSK register
*/
static void
-spider_net_tx_irq_off(struct spider_net_card *card)
+spider_net_rx_irq_off(struct spider_net_card *card)
{
u32 regvalue;
- unsigned long flags;
- spin_lock_irqsave(&card->intmask_lock, flags);
- regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
- regvalue &= ~SPIDER_NET_TXINT;
- spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
- spin_unlock_irqrestore(&card->intmask_lock, flags);
+ regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
}
/**
- * spider_net_tx_irq_on - switch on tx irq on this spider card
+ * spider_net_rx_irq_on - switch on rx irq on this spider card
* @card: device structure
*
- * switches on tx irq by enabling them in the GHIINTnMSK register
+ * switches on rx irq by enabling them in the GHIINTnMSK register
*/
static void
-spider_net_tx_irq_on(struct spider_net_card *card)
+spider_net_rx_irq_on(struct spider_net_card *card)
{
u32 regvalue;
- unsigned long flags;
- spin_lock_irqsave(&card->intmask_lock, flags);
- regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
- regvalue |= SPIDER_NET_TXINT;
- spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
- spin_unlock_irqrestore(&card->intmask_lock, flags);
+ regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
+ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
}
/**
@@ -326,9 +264,8 @@ static enum spider_net_descr_status
spider_net_get_descr_status(struct spider_net_descr *descr)
{
u32 cmd_status;
- rmb();
+
cmd_status = descr->dmac_cmd_status;
- rmb();
cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
/* no need to mask out any bits, as cmd_status is 32 bits wide only
* (and unsigned) */
@@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
{
u32 cmd_status;
/* read the status */
- mb();
cmd_status = descr->dmac_cmd_status;
/* clean the upper 4 bits */
cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
@@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
/* and write it back */
descr->dmac_cmd_status = cmd_status;
- wmb();
}
/**
@@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card,
{
int i;
struct spider_net_descr *descr;
+ dma_addr_t buf;
- spin_lock_init(&card->chain_lock);
+ atomic_set(&card->rx_chain_refill,0);
descr = start_descr;
memset(descr, 0, sizeof(*descr) * no);
@@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card,
for (i=0; i<no; i++, descr++) {
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
- descr->bus_addr =
- pci_map_single(card->pdev, descr,
- SPIDER_NET_DESCR_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ buf = pci_map_single(card->pdev, descr,
+ SPIDER_NET_DESCR_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
- if (descr->bus_addr == DMA_ERROR_CODE)
+ if (buf == DMA_ERROR_CODE)
goto iommu_error;
+ descr->bus_addr = buf;
descr->next = descr + 1;
descr->prev = descr - 1;
@@ -439,7 +375,8 @@ iommu_error:
for (i=0; i < no; i++, descr++)
if (descr->bus_addr)
pci_unmap_single(card->pdev, descr->bus_addr,
- SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
+ SPIDER_NET_DESCR_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
return -ENOMEM;
}
@@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
if (descr->skb) {
dev_kfree_skb(descr->skb);
pci_unmap_single(card->pdev, descr->buf_addr,
- SPIDER_NET_MAX_MTU,
+ SPIDER_NET_MAX_FRAME,
PCI_DMA_BIDIRECTIONAL);
}
descr = descr->next;
@@ -480,12 +417,13 @@ static int
spider_net_prepare_rx_descr(struct spider_net_card *card,
struct spider_net_descr *descr)
{
+ dma_addr_t buf;
int error = 0;
int offset;
int bufsize;
/* we need to round up the buffer size to a multiple of 128 */
- bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) &
+ bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
(~(SPIDER_NET_RXBUF_ALIGN - 1));
/* and we need to have it 128 byte aligned, therefore we allocate a
@@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
/* allocate an skb */
descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
if (!descr->skb) {
- if (net_ratelimit())
- if (netif_msg_rx_err(card))
- pr_err("Not enough memory to allocate "
- "rx buffer\n");
+ if (netif_msg_rx_err(card) && net_ratelimit())
+ pr_err("Not enough memory to allocate rx buffer\n");
return -ENOMEM;
}
descr->buf_size = bufsize;
@@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
if (offset)
skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */
- descr->buf_addr = pci_map_single(card->pdev, descr->skb->data,
- SPIDER_NET_MAX_MTU,
- PCI_DMA_BIDIRECTIONAL);
- if (descr->buf_addr == DMA_ERROR_CODE) {
+ buf = pci_map_single(card->pdev, descr->skb->data,
+ SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+ descr->buf_addr = buf;
+ if (buf == DMA_ERROR_CODE) {
dev_kfree_skb_any(descr->skb);
- if (netif_msg_rx_err(card))
+ if (netif_msg_rx_err(card) && net_ratelimit())
pr_err("Could not iommu-map rx buffer\n");
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
} else {
@@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
}
/**
- * spider_net_enable_rxctails - sets RX dmac chain tail addresses
+ * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
* @card: card structure
*
- * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the
+ * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
* chip by writing to the appropriate register. DMA is enabled in
* spider_net_enable_rxdmac.
*/
@@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
static void
spider_net_enable_rxdmac(struct spider_net_card *card)
{
+ wmb();
spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
SPIDER_NET_DMA_RX_VALUE);
}
@@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
* spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
* @card: card structure
*
- * refills descriptors in all chains (last used chain first): allocates skbs
- * and iommu-maps them.
+ * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
*/
static void
spider_net_refill_rx_chain(struct spider_net_card *card)
{
struct spider_net_descr_chain *chain;
- int count = 0;
- unsigned long flags;
chain = &card->rx_chain;
- spin_lock_irqsave(&card->chain_lock, flags);
- while (spider_net_get_descr_status(chain->head) ==
- SPIDER_NET_DESCR_NOT_IN_USE) {
- if (spider_net_prepare_rx_descr(card, chain->head))
- break;
- count++;
- chain->head = chain->head->next;
- }
- spin_unlock_irqrestore(&card->chain_lock, flags);
+ /* one context doing the refill (and a second context seeing that
+ * and omitting it) is ok. If called by NAPI, we'll be called again
+ * as spider_net_decode_one_descr is called several times. If some
+ * interrupt calls us, the NAPI is about to clean up anyway. */
+ if (atomic_inc_return(&card->rx_chain_refill) == 1)
+ while (spider_net_get_descr_status(chain->head) ==
+ SPIDER_NET_DESCR_NOT_IN_USE) {
+ if (spider_net_prepare_rx_descr(card, chain->head))
+ break;
+ chain->head = chain->head->next;
+ }
- /* could be optimized, only do that, if we know the DMA processing
- * has terminated */
- if (count)
- spider_net_enable_rxdmac(card);
+ atomic_dec(&card->rx_chain_refill);
}
/**
@@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
/* this will allocate the rest of the rx buffers; if not, it's
* business as usual later on */
spider_net_refill_rx_chain(card);
+ spider_net_enable_rxdmac(card);
return 0;
error:
@@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
* @card: adapter structure
* @brutal: if set, don't care about whether descriptor seems to be in use
*
- * releases the tx descriptors that spider has finished with (if non-brutal)
- * or simply release tx descriptors (if brutal)
+ * returns 0 if the tx ring is empty, otherwise 1.
+ *
+ * spider_net_release_tx_chain releases the tx descriptors that spider has
+ * finished with (if non-brutal) or simply release tx descriptors (if brutal).
+ * If some other context is calling this function, we return 1 so that we're
+ * scheduled again (if we were scheduled) and will not loose initiative.
*/
-static void
+static int
spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
{
struct spider_net_descr_chain *tx_chain = &card->tx_chain;
enum spider_net_descr_status status;
- spider_net_tx_irq_off(card);
+ if (atomic_inc_return(&card->tx_chain_release) != 1) {
+ atomic_dec(&card->tx_chain_release);
+ return 1;
+ }
- /* no lock for chain needed, if this is only executed once at a time */
-again:
for (;;) {
status = spider_net_get_descr_status(tx_chain->tail);
switch (status) {
case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal) goto out;
+ if (!brutal)
+ goto out;
/* fallthrough, if we release the descriptors
* brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */
@@ -693,25 +633,30 @@ again:
tx_chain->tail = tx_chain->tail->next;
}
out:
+ atomic_dec(&card->tx_chain_release);
+
netif_wake_queue(card->netdev);
- if (!brutal) {
- /* switch on tx irqs (while we are still in the interrupt
- * handler, so we don't get an interrupt), check again
- * for done descriptors. This results in fewer interrupts */
- spider_net_tx_irq_on(card);
- status = spider_net_get_descr_status(tx_chain->tail);
- switch (status) {
- case SPIDER_NET_DESCR_RESPONSE_ERROR:
- case SPIDER_NET_DESCR_PROTECTION_ERROR:
- case SPIDER_NET_DESCR_FORCE_END:
- case SPIDER_NET_DESCR_COMPLETE:
- goto again;
- default:
- break;
- }
- }
+ if (status == SPIDER_NET_DESCR_CARDOWNED)
+ return 1;
+ return 0;
+}
+/**
+ * spider_net_cleanup_tx_ring - cleans up the TX ring
+ * @card: card structure
+ *
+ * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
+ * interrupts to cleanup our TX ring) and returns sent packets to the stack
+ * by freeing them
+ */
+static void
+spider_net_cleanup_tx_ring(struct spider_net_card *card)
+{
+ if ( (spider_net_release_tx_chain(card, 0)) &&
+ (card->netdev->flags & IFF_UP) ) {
+ mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
+ }
}
/**
@@ -726,16 +671,22 @@ out:
static u8
spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
{
- /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
- * ff:ff:ff:ff:ff:ff must result in 0xfd */
u32 crc;
u8 hash;
+ char addr_for_crc[ETH_ALEN] = { 0, };
+ int i, bit;
- crc = crc32_be(~0, addr, netdev->addr_len);
+ for (i = 0; i < ETH_ALEN * 8; i++) {
+ bit = (addr[i / 8] >> (i % 8)) & 1;
+ addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
+ }
+
+ crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
hash = (crc >> 27);
hash <<= 3;
hash |= crc & 7;
+ hash &= 0xff;
return hash;
}
@@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev)
{
struct spider_net_card *card = netdev_priv(netdev);
+ tasklet_kill(&card->rxram_full_tl);
netif_poll_disable(netdev);
netif_carrier_off(netdev);
netif_stop_queue(netdev);
+ del_timer_sync(&card->tx_timer);
/* disable/mask all interrupts */
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
@@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
* @skb: packet to consider
*
* fills out the command and status field of the descriptor structure,
- * depending on hardware checksum settings. This function assumes a wmb()
- * has executed before.
+ * depending on hardware checksum settings.
*/
static void
spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
struct sk_buff *skb)
{
+ /* make sure the other fields in the descriptor are written */
+ wmb();
+
if (skb->ip_summed != CHECKSUM_HW) {
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
return;
@@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
/* is packet ip?
* if yes: tcp? udp? */
if (skb->protocol == htons(ETH_P_IP)) {
- if (skb->nh.iph->protocol == IPPROTO_TCP) {
+ if (skb->nh.iph->protocol == IPPROTO_TCP)
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
- } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
+ else if (skb->nh.iph->protocol == IPPROTO_UDP)
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
- } else { /* the stack should checksum non-tcp and non-udp
- packets on his own: NETIF_F_IP_CSUM */
+ else /* the stack should checksum non-tcp and non-udp
+ packets on his own: NETIF_F_IP_CSUM */
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
- }
}
}
@@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
struct spider_net_descr *descr,
struct sk_buff *skb)
{
- descr->buf_addr = pci_map_single(card->pdev, skb->data,
- skb->len, PCI_DMA_BIDIRECTIONAL);
- if (descr->buf_addr == DMA_ERROR_CODE) {
- if (netif_msg_tx_err(card))
+ dma_addr_t buf;
+
+ buf = pci_map_single(card->pdev, skb->data,
+ skb->len, PCI_DMA_BIDIRECTIONAL);
+ if (buf == DMA_ERROR_CODE) {
+ if (netif_msg_tx_err(card) && net_ratelimit())
pr_err("could not iommu-map packet (%p, %i). "
"Dropping packet\n", skb->data, skb->len);
return -ENOMEM;
}
+ descr->buf_addr = buf;
descr->buf_size = skb->len;
descr->skb = skb;
descr->data_status = 0;
- /* make sure the above values are in memory before we change the
- * status */
- wmb();
-
spider_net_set_txdescr_cmdstat(descr,skb);
return 0;
@@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
struct spider_net_descr *descr;
int result;
- descr = spider_net_get_next_tx_descr(card);
+ spider_net_release_tx_chain(card, 0);
- if (!descr) {
- netif_stop_queue(netdev);
+ descr = spider_net_get_next_tx_descr(card);
- descr = spider_net_get_next_tx_descr(card);
- if (!descr)
- goto error;
- else
- netif_start_queue(netdev);
- }
+ if (!descr)
+ goto error;
result = spider_net_prepare_tx_descr(card, descr, skb);
if (result)
@@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
card->tx_chain.head = card->tx_chain.head->next;
- /* make sure the status from spider_net_prepare_tx_descr is in
- * memory before we check out the previous descriptor */
- wmb();
-
if (spider_net_get_descr_status(descr->prev) !=
- SPIDER_NET_DESCR_CARDOWNED)
- spider_net_kick_tx_dma(card, descr);
+ SPIDER_NET_DESCR_CARDOWNED) {
+ /* make sure the current descriptor is in memory. Then
+ * kicking it on again makes sense, if the previous is not
+ * card-owned anymore. Check the previous descriptor twice
+ * to omit an mb() in heavy traffic cases */
+ mb();
+ if (spider_net_get_descr_status(descr->prev) !=
+ SPIDER_NET_DESCR_CARDOWNED)
+ spider_net_kick_tx_dma(card, descr);
+ }
+
+ mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
return NETDEV_TX_OK;
error:
card->netdev_stats.tx_dropped++;
- return NETDEV_TX_LOCKED;
+ return NETDEV_TX_BUSY;
}
/**
@@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
* @descr: descriptor to process
* @card: card structure
+ * @napi: whether caller is in NAPI context
*
* returns 1 on success, 0 if no packet was passed to the stack
*
@@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
*/
static int
spider_net_pass_skb_up(struct spider_net_descr *descr,
- struct spider_net_card *card)
+ struct spider_net_card *card, int napi)
{
struct sk_buff *skb;
struct net_device *netdev;
@@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
netdev = card->netdev;
- /* check for errors in the data_error flag */
- if ((data_error & SPIDER_NET_DATA_ERROR_MASK) &&
- netif_msg_rx_err(card))
- pr_err("error in received descriptor found, "
- "data_status=x%08x, data_error=x%08x\n",
- data_status, data_error);
-
- /* prepare skb, unmap descriptor */
- skb = descr->skb;
- pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
+ /* unmap descriptor */
+ pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
PCI_DMA_BIDIRECTIONAL);
/* the cases we'll throw away the packet immediately */
- if (data_error & SPIDER_NET_DESTROY_RX_FLAGS)
+ if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
+ if (netif_msg_rx_err(card))
+ pr_err("error in received descriptor found, "
+ "data_status=x%08x, data_error=x%08x\n",
+ data_status, data_error);
return 0;
+ }
+ skb = descr->skb;
skb->dev = netdev;
skb_put(skb, descr->valid_size);
@@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
/* checksum offload */
if (card->options.rx_csum) {
- if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) &&
- (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) )
+ if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
+ SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
+ !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
- } else {
+ } else
skb->ip_summed = CHECKSUM_NONE;
- }
if (data_status & SPIDER_NET_VLAN_PACKET) {
/* further enhancements: HW-accel VLAN
@@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
}
/* pass skb up to stack */
- netif_receive_skb(skb);
+ if (napi)
+ netif_receive_skb(skb);
+ else
+ netif_rx_ni(skb);
/* update netdevice statistics */
card->netdev_stats.rx_packets++;
@@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
}
/**
- * spider_net_decode_descr - processes an rx descriptor
+ * spider_net_decode_one_descr - processes an rx descriptor
* @card: card structure
+ * @napi: whether caller is in NAPI context
*
* returns 1 if a packet has been sent to the stack, otherwise 0
*
* processes an rx descriptor by iommu-unmapping the data buffer and passing
- * the packet up to the stack
+ * the packet up to the stack. This function is called in softirq
+ * context, e.g. either bottom half from interrupt or NAPI polling context
*/
static int
-spider_net_decode_one_descr(struct spider_net_card *card)
+spider_net_decode_one_descr(struct spider_net_card *card, int napi)
{
enum spider_net_descr_status status;
struct spider_net_descr *descr;
@@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
if (status == SPIDER_NET_DESCR_CARDOWNED) {
/* nothing in the descriptor yet */
- return 0;
+ result=0;
+ goto out;
}
if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
- /* not initialized yet, I bet chain->tail == chain->head
- * and the ring is empty */
+ /* not initialized yet, the ring must be empty */
spider_net_refill_rx_chain(card);
- return 0;
+ spider_net_enable_rxdmac(card);
+ result=0;
+ goto out;
}
- /* descriptor definitively used -- move on head */
+ /* descriptor definitively used -- move on tail */
chain->tail = descr->next;
result = 0;
@@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
pr_err("%s: dropping RX descriptor with state %d\n",
card->netdev->name, status);
card->netdev_stats.rx_dropped++;
+ pci_unmap_single(card->pdev, descr->buf_addr,
+ SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+ dev_kfree_skb_irq(descr->skb);
goto refill;
}
@@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
}
/* ok, we've got a packet in descr */
- result = spider_net_pass_skb_up(descr, card);
+ result = spider_net_pass_skb_up(descr, card, napi);
refill:
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
/* change the descriptor state: */
- spider_net_refill_rx_chain(card);
-
+ if (!napi)
+ spider_net_refill_rx_chain(card);
+out:
return result;
}
@@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
packets_to_do = min(*budget, netdev->quota);
while (packets_to_do) {
- if (spider_net_decode_one_descr(card)) {
+ if (spider_net_decode_one_descr(card, 1)) {
packets_done++;
packets_to_do--;
} else {
@@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
netdev->quota -= packets_done;
*budget -= packets_done;
+ spider_net_refill_rx_chain(card);
/* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */
@@ -1342,6 +1307,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
}
/**
+ * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
+ * @card: card structure
+ *
+ * spider_net_handle_rxram_full empties the RX ring so that spider can put
+ * more packets in it and empty its RX RAM. This is called in bottom half
+ * context
+ */
+static void
+spider_net_handle_rxram_full(struct spider_net_card *card)
+{
+ while (spider_net_decode_one_descr(card, 0))
+ ;
+ spider_net_enable_rxchtails(card);
+ spider_net_enable_rxdmac(card);
+ netif_rx_schedule(card->netdev);
+}
+
+/**
* spider_net_handle_error_irq - handles errors raised by an interrupt
* @card: card structure
* @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
switch (i)
{
case SPIDER_NET_GTMFLLINT:
- if (netif_msg_intr(card))
+ if (netif_msg_intr(card) && net_ratelimit())
pr_err("Spider TX RAM full\n");
show_error = 0;
break;
+ case SPIDER_NET_GRFDFLLINT: /* fallthrough */
+ case SPIDER_NET_GRFCFLLINT: /* fallthrough */
+ case SPIDER_NET_GRFBFLLINT: /* fallthrough */
+ case SPIDER_NET_GRFAFLLINT: /* fallthrough */
case SPIDER_NET_GRMFLLINT:
- if (netif_msg_intr(card))
+ if (netif_msg_intr(card) && net_ratelimit())
pr_err("Spider RX RAM full, incoming packets "
- "might be discarded !\n");
- netif_rx_schedule(card->netdev);
- spider_net_enable_rxchtails(card);
- spider_net_enable_rxdmac(card);
+ "might be discarded!\n");
+ spider_net_rx_irq_off(card);
+ tasklet_schedule(&card->rxram_full_tl);
+ show_error = 0;
break;
/* case SPIDER_NET_GTMSHTINT: problem, print a message */
@@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
/* allrighty. tx from previous descr ok */
show_error = 0;
break;
- /* case SPIDER_NET_GRFDFLLINT: print a message down there */
- /* case SPIDER_NET_GRFCFLLINT: print a message down there */
- /* case SPIDER_NET_GRFBFLLINT: print a message down there */
- /* case SPIDER_NET_GRFAFLLINT: print a message down there */
/* chain end */
case SPIDER_NET_GDDDCEINT: /* fallthrough */
@@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
"restarting DMAC %c.\n",
'D'+i-SPIDER_NET_GDDDCEINT);
spider_net_refill_rx_chain(card);
+ spider_net_enable_rxdmac(card);
show_error = 0;
break;
@@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
case SPIDER_NET_GDAINVDINT:
/* could happen when rx chain is full */
spider_net_refill_rx_chain(card);
+ spider_net_enable_rxdmac(card);
show_error = 0;
break;
@@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
if (!status_reg)
return IRQ_NONE;
- if (status_reg & SPIDER_NET_TXINT)
- spider_net_release_tx_chain(card, 0);
-
if (status_reg & SPIDER_NET_RXINT ) {
spider_net_rx_irq_off(card);
netif_rx_schedule(netdev);
}
- /* we do this after rx and tx processing, as we want the tx chain
- * processed to see, whether we should restart tx dma processing */
- spider_net_handle_error_irq(card, status_reg);
+ if (status_reg & SPIDER_NET_ERRINT )
+ spider_net_handle_error_irq(card, status_reg);
/* clear interrupt sources */
spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card)
/**
* spider_net_download_firmware - loads firmware into the adapter
* @card: card structure
- * @firmware: firmware pointer
+ * @firmware_ptr: pointer to firmware data
*
- * spider_net_download_firmware loads the firmware opened by
- * spider_net_init_firmware into the adapter.
+ * spider_net_download_firmware loads the firmware data into the
+ * adapter. It assumes the length etc. to be allright.
*/
-static void
+static int
spider_net_download_firmware(struct spider_net_card *card,
- const struct firmware *firmware)
+ u8 *firmware_ptr)
{
int sequencer, i;
- u32 *fw_ptr = (u32 *)firmware->data;
+ u32 *fw_ptr = (u32 *)firmware_ptr;
/* stop sequencers */
spider_net_write_reg(card, SPIDER_NET_GSINIT,
SPIDER_NET_STOP_SEQ_VALUE);
- for (sequencer = 0; sequencer < 6; sequencer++) {
+ for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
+ sequencer++) {
spider_net_write_reg(card,
SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
+ for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
sequencer * 8, *fw_ptr);
fw_ptr++;
}
}
+ if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
+ return -EIO;
+
spider_net_write_reg(card, SPIDER_NET_GSINIT,
SPIDER_NET_RUN_SEQ_VALUE);
+
+ return 0;
}
/**
@@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card,
static int
spider_net_init_firmware(struct spider_net_card *card)
{
- const struct firmware *firmware;
- int err = -EIO;
+ struct firmware *firmware = NULL;
+ struct device_node *dn;
+ u8 *fw_prop = NULL;
+ int err = -ENOENT;
+ int fw_size;
+
+ if (request_firmware((const struct firmware **)&firmware,
+ SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
+ if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
+ netif_msg_probe(card) ) {
+ pr_err("Incorrect size of spidernet firmware in " \
+ "filesystem. Looking in host firmware...\n");
+ goto try_host_fw;
+ }
+ err = spider_net_download_firmware(card, firmware->data);
- if (request_firmware(&firmware,
- SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) {
- if (netif_msg_probe(card))
- pr_err("Couldn't read in sequencer data file %s.\n",
- SPIDER_NET_FIRMWARE_NAME);
- firmware = NULL;
- goto out;
- }
+ release_firmware(firmware);
+ if (err)
+ goto try_host_fw;
- if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) {
- if (netif_msg_probe(card))
- pr_err("Invalid size of sequencer data file %s.\n",
- SPIDER_NET_FIRMWARE_NAME);
- goto out;
+ goto done;
}
- spider_net_download_firmware(card, firmware);
+try_host_fw:
+ dn = pci_device_to_OF_node(card->pdev);
+ if (!dn)
+ goto out_err;
- err = 0;
-out:
- release_firmware(firmware);
+ fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
+ if (!fw_prop)
+ goto out_err;
+
+ if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
+ netif_msg_probe(card) ) {
+ pr_err("Incorrect size of spidernet firmware in " \
+ "host firmware\n");
+ goto done;
+ }
+ err = spider_net_download_firmware(card, fw_prop);
+
+done:
+ return err;
+out_err:
+ if (netif_msg_probe(card))
+ pr_err("Couldn't find spidernet firmware in filesystem " \
+ "or host firmware\n");
return err;
}
@@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
SPIDER_NET_CKRCTRL_RUN_VALUE);
/* empty sequencer data */
- for (sequencer = 0; sequencer < 6; sequencer++) {
+ for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
+ sequencer++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
sequencer * 8, 0x0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
+ for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
sequencer * 8, 0x0);
}
@@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
SET_NETDEV_DEV(netdev, &card->pdev->dev);
pci_set_drvdata(card->pdev, netdev);
- spin_lock_init(&card->intmask_lock);
+
+ atomic_set(&card->tx_chain_release,0);
+ card->rxram_full_tl.data = (unsigned long) card;
+ card->rxram_full_tl.func =
+ (void (*)(unsigned long)) spider_net_handle_rxram_full;
+ init_timer(&card->tx_timer);
+ card->tx_timer.function =
+ (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
+ card->tx_timer.data = (unsigned long) card;
netdev->irq = card->pdev->irq;
card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 22b2f2347351..5922b529a048 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -33,25 +33,32 @@ extern struct ethtool_ops spider_net_ethtool_ops;
extern char spider_net_driver_name[];
-#define SPIDER_NET_MAX_MTU 2308
+#define SPIDER_NET_MAX_FRAME 2312
+#define SPIDER_NET_MAX_MTU 2294
#define SPIDER_NET_MIN_MTU 64
#define SPIDER_NET_RXBUF_ALIGN 128
-#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 64
+#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_RX_DESCRIPTORS_MAX 256
+#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
-#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 64
+#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_TX_DESCRIPTORS_MAX 256
+#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
+
+#define SPIDER_NET_TX_TIMER 20
#define SPIDER_NET_RX_CSUM_DEFAULT 1
-#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ
-#define SPIDER_NET_NAPI_WEIGHT 64
+#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
+#define SPIDER_NET_NAPI_WEIGHT 64
-#define SPIDER_NET_FIRMWARE_LEN 1024
+#define SPIDER_NET_FIRMWARE_SEQS 6
+#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
+#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
+ SPIDER_NET_FIRMWARE_SEQWORDS * \
+ sizeof(u32))
#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
/** spider_net SMMIO registers */
@@ -142,14 +149,12 @@ extern char spider_net_driver_name[];
/** SCONFIG registers */
#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
-/** hardcoded register values */
-#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe3ff
-#define SPIDER_NET_INT1_MASK_VALUE 0xffffffff
+/** interrupt mask registers */
+#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
+#define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7
/* no MAC aborts -> auto retransmission */
-#define SPIDER_NET_INT2_MASK_VALUE 0xfffffff1
+#define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1
-/* clear counter when interrupt sources are cleared
-#define SPIDER_NET_FRAMENUM_VALUE 0x0001f001 */
/* we rely on flagged descriptor interrupts */
#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
/* set this first, then the FRAMENUM_VALUE */
@@ -168,7 +173,7 @@ extern char spider_net_driver_name[];
#if 0
#define SPIDER_NET_WOL_VALUE 0x00000000
#endif
-#define SPIDER_NET_IPSECINIT_VALUE 0x00f000f8
+#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
/* pause frames: automatic, no upper retransmission count */
/* outside loopback mode: ETOMOD signal dont matter, not connected */
@@ -318,6 +323,10 @@ enum spider_net_int2_status {
#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \
(1 << SPIDER_NET_GRMFLLINT) )
+#define SPIDER_NET_ERRINT ( 0xffffffff & \
+ (~SPIDER_NET_TXINT) & \
+ (~SPIDER_NET_RXINT) )
+
#define SPIDER_NET_GPREXEC 0x80000000
#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
@@ -358,9 +367,6 @@ enum spider_net_int2_status {
/* descr ready, descr is in middle of chain, get interrupt on completion */
#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
-/* multicast is no problem */
-#define SPIDER_NET_DATA_ERROR_MASK 0xffffbfff
-
enum spider_net_descr_status {
SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
@@ -373,9 +379,9 @@ enum spider_net_descr_status {
struct spider_net_descr {
/* as defined by the hardware */
- dma_addr_t buf_addr;
+ u32 buf_addr;
u32 buf_size;
- dma_addr_t next_descr_addr;
+ u32 next_descr_addr;
u32 dmac_cmd_status;
u32 result_size;
u32 valid_size; /* all zeroes for tx */
@@ -384,7 +390,7 @@ struct spider_net_descr {
/* used in the driver */
struct sk_buff *skb;
- dma_addr_t bus_addr;
+ u32 bus_addr;
struct spider_net_descr *next;
struct spider_net_descr *prev;
} __attribute__((aligned(32)));
@@ -396,21 +402,21 @@ struct spider_net_descr_chain {
};
/* descriptor data_status bits */
-#define SPIDER_NET_RXIPCHK 29
-#define SPIDER_NET_TCPUDPIPCHK 28
-#define SPIDER_NET_DATA_STATUS_CHK_MASK (1 << SPIDER_NET_RXIPCHK | \
- 1 << SPIDER_NET_TCPUDPIPCHK)
-
+#define SPIDER_NET_RX_IPCHK 29
+#define SPIDER_NET_RX_TCPCHK 28
#define SPIDER_NET_VLAN_PACKET 21
+#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
+ (1 << SPIDER_NET_RX_TCPCHK) )
/* descriptor data_error bits */
-#define SPIDER_NET_RXIPCHKERR 27
-#define SPIDER_NET_RXTCPCHKERR 26
-#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \
- 1 << SPIDER_NET_RXTCPCHKERR)
+#define SPIDER_NET_RX_IPCHKERR 27
+#define SPIDER_NET_RX_RXTCPCHKERR 28
+
+#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
-/* the cases we don't pass the packet to the stack */
-#define SPIDER_NET_DESTROY_RX_FLAGS 0x70138000
+/* the cases we don't pass the packet to the stack.
+ * 701b8000 would be correct, but every packets gets that flag */
+#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
#define SPIDER_NET_DESCR_SIZE 32
@@ -445,13 +451,16 @@ struct spider_net_card {
struct spider_net_descr_chain tx_chain;
struct spider_net_descr_chain rx_chain;
- spinlock_t chain_lock;
+ atomic_t rx_chain_refill;
+ atomic_t tx_chain_release;
struct net_device_stats netdev_stats;
struct spider_net_options options;
spinlock_t intmask_lock;
+ struct tasklet_struct rxram_full_tl;
+ struct timer_list tx_timer;
struct work_struct tx_timeout_task;
atomic_t tx_timeout_task_counter;
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index d42e60ba74ce..a5bb0b7633af 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -113,6 +113,23 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
return 0;
}
+static uint32_t
+spider_net_ethtool_get_tx_csum(struct net_device *netdev)
+{
+ return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int
+spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
+{
+ if (data)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
struct ethtool_ops spider_net_ethtool_ops = {
.get_settings = spider_net_ethtool_get_settings,
.get_drvinfo = spider_net_ethtool_get_drvinfo,
@@ -122,5 +139,7 @@ struct ethtool_ops spider_net_ethtool_ops = {
.nway_reset = spider_net_ethtool_nway_reset,
.get_rx_csum = spider_net_ethtool_get_rx_csum,
.set_rx_csum = spider_net_ethtool_set_rx_csum,
+ .get_tx_csum = spider_net_ethtool_get_tx_csum,
+ .set_tx_csum = spider_net_ethtool_set_tx_csum,
};
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index eb86b059809b..f2d1dafde087 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.47"
-#define DRV_MODULE_RELDATE "Dec 28, 2005"
+#define DRV_MODULE_VERSION "3.48"
+#define DRV_MODULE_RELDATE "Jan 16, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -1325,10 +1325,12 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
tw32(0x7d00, val);
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
- tg3_nvram_lock(tp);
+ int err;
+
+ err = tg3_nvram_lock(tp);
tg3_halt_cpu(tp, RX_CPU_BASE);
- tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0);
- tg3_nvram_unlock(tp);
+ if (!err)
+ tg3_nvram_unlock(tp);
}
}
@@ -4193,14 +4195,19 @@ static int tg3_nvram_lock(struct tg3 *tp)
if (tp->tg3_flags & TG3_FLAG_NVRAM) {
int i;
- tw32(NVRAM_SWARB, SWARB_REQ_SET1);
- for (i = 0; i < 8000; i++) {
- if (tr32(NVRAM_SWARB) & SWARB_GNT1)
- break;
- udelay(20);
+ if (tp->nvram_lock_cnt == 0) {
+ tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+ for (i = 0; i < 8000; i++) {
+ if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+ break;
+ udelay(20);
+ }
+ if (i == 8000) {
+ tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
+ return -ENODEV;
+ }
}
- if (i == 8000)
- return -ENODEV;
+ tp->nvram_lock_cnt++;
}
return 0;
}
@@ -4208,8 +4215,12 @@ static int tg3_nvram_lock(struct tg3 *tp)
/* tp->lock is held. */
static void tg3_nvram_unlock(struct tg3 *tp)
{
- if (tp->tg3_flags & TG3_FLAG_NVRAM)
- tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+ if (tp->tg3_flags & TG3_FLAG_NVRAM) {
+ if (tp->nvram_lock_cnt > 0)
+ tp->nvram_lock_cnt--;
+ if (tp->nvram_lock_cnt == 0)
+ tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+ }
}
/* tp->lock is held. */
@@ -4320,8 +4331,13 @@ static int tg3_chip_reset(struct tg3 *tp)
void (*write_op)(struct tg3 *, u32, u32);
int i;
- if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+ if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
tg3_nvram_lock(tp);
+ /* No matching tg3_nvram_unlock() after this because
+ * chip reset below will undo the nvram lock.
+ */
+ tp->nvram_lock_cnt = 0;
+ }
/*
* We must avoid the readl() that normally takes place.
@@ -4717,6 +4733,10 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
(offset == RX_CPU_BASE ? "RX" : "TX"));
return -ENODEV;
}
+
+ /* Clear firmware's nvram arbitration. */
+ if (tp->tg3_flags & TG3_FLAG_NVRAM)
+ tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
return 0;
}
@@ -4736,7 +4756,7 @@ struct fw_info {
static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
int cpu_scratch_size, struct fw_info *info)
{
- int err, i;
+ int err, lock_err, i;
void (*write_op)(struct tg3 *, u32, u32);
if (cpu_base == TX_CPU_BASE &&
@@ -4755,9 +4775,10 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
/* It is possible that bootcode is still loading at this point.
* Get the nvram lock first before halting the cpu.
*/
- tg3_nvram_lock(tp);
+ lock_err = tg3_nvram_lock(tp);
err = tg3_halt_cpu(tp, cpu_base);
- tg3_nvram_unlock(tp);
+ if (!lock_err)
+ tg3_nvram_unlock(tp);
if (err)
goto out;
@@ -8182,7 +8203,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
data[1] = 1;
}
if (etest->flags & ETH_TEST_FL_OFFLINE) {
- int irq_sync = 0;
+ int err, irq_sync = 0;
if (netif_running(dev)) {
tg3_netif_stop(tp);
@@ -8192,11 +8213,12 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_full_lock(tp, irq_sync);
tg3_halt(tp, RESET_KIND_SUSPEND, 1);
- tg3_nvram_lock(tp);
+ err = tg3_nvram_lock(tp);
tg3_halt_cpu(tp, RX_CPU_BASE);
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
tg3_halt_cpu(tp, TX_CPU_BASE);
- tg3_nvram_unlock(tp);
+ if (!err)
+ tg3_nvram_unlock(tp);
if (tg3_test_registers(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -8588,7 +8610,11 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
tp->tg3_flags |= TG3_FLAG_NVRAM;
- tg3_nvram_lock(tp);
+ if (tg3_nvram_lock(tp)) {
+ printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
+ "tg3_nvram_init failed.\n", tp->dev->name);
+ return;
+ }
tg3_enable_nvram_access(tp);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
@@ -8686,7 +8712,9 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
if (offset > NVRAM_ADDR_MSK)
return -EINVAL;
- tg3_nvram_lock(tp);
+ ret = tg3_nvram_lock(tp);
+ if (ret)
+ return ret;
tg3_enable_nvram_access(tp);
@@ -8785,10 +8813,6 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
offset = offset + (pagesize - page_off);
- /* Nvram lock released by tg3_nvram_read() above,
- * so need to get it again.
- */
- tg3_nvram_lock(tp);
tg3_enable_nvram_access(tp);
/*
@@ -8925,7 +8949,9 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
else {
u32 grc_mode;
- tg3_nvram_lock(tp);
+ ret = tg3_nvram_lock(tp);
+ if (ret)
+ return ret;
tg3_enable_nvram_access(tp);
if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 890e1635996b..e8243305f0e8 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2275,6 +2275,7 @@ struct tg3 {
dma_addr_t stats_mapping;
struct work_struct reset_task;
+ int nvram_lock_cnt;
u32 nvram_size;
u32 nvram_pagesize;
u32 nvram_jedecnum;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ee866fd6957d..a4c7ae94614d 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev,
int channel = fwrq->m;
/* We should do a better check than that,
* based on the card capability !!! */
- if((channel < 1) || (channel > 16)) {
+ if((channel < 1) || (channel > 14)) {
printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m);
rc = -EINVAL;
} else {
readConfigRid(local, 1);
/* Yes ! We can set it !!! */
- local->config.channelSet = (u16)(channel - 1);
+ local->config.channelSet = (u16) channel;
set_bit (FLAG_COMMIT, &local->flags);
}
}
@@ -5692,6 +5692,7 @@ static int airo_get_freq(struct net_device *dev,
{
struct airo_info *local = dev->priv;
StatusRid status_rid; /* Card status info */
+ int ch;
readConfigRid(local, 1);
if ((local->config.opmode & 0xFF) == MODE_STA_ESS)
@@ -5699,16 +5700,14 @@ static int airo_get_freq(struct net_device *dev,
else
readStatusRid(local, &status_rid, 1);
-#ifdef WEXT_USECHANNELS
- fwrq->m = ((int)status_rid.channel) + 1;
- fwrq->e = 0;
-#else
- {
- int f = (int)status_rid.channel;
- fwrq->m = frequency_list[f] * 100000;
+ ch = (int)status_rid.channel;
+ if((ch > 0) && (ch < 15)) {
+ fwrq->m = frequency_list[ch - 1] * 100000;
fwrq->e = 1;
+ } else {
+ fwrq->m = ch;
+ fwrq->e = 0;
}
-#endif
return 0;
}
@@ -5783,7 +5782,7 @@ static int airo_get_essid(struct net_device *dev,
/* If none, we may want to get the one that was set */
/* Push it out ! */
- dwrq->length = status_rid.SSIDlen + 1;
+ dwrq->length = status_rid.SSIDlen;
dwrq->flags = 1; /* active */
return 0;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index f0ccfef66445..98a76f10a0f7 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1718,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev,
if (priv->new_SSID_size != 0) {
memcpy(extra, priv->new_SSID, priv->new_SSID_size);
extra[priv->new_SSID_size] = '\0';
- dwrq->length = priv->new_SSID_size + 1;
+ dwrq->length = priv->new_SSID_size;
} else {
memcpy(extra, priv->SSID, priv->SSID_size);
extra[priv->SSID_size] = '\0';
- dwrq->length = priv->SSID_size + 1;
+ dwrq->length = priv->SSID_size;
}
dwrq->flags = !priv->connect_to_any_BSS; /* active */
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index 56f41c714d38..c8f6286dd35f 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -26,11 +26,25 @@ config HOSTAP_FIRMWARE
depends on HOSTAP
---help---
Configure Host AP driver to include support for firmware image
- download. Current version supports only downloading to volatile, i.e.,
- RAM memory. Flash upgrade is not yet supported.
+ download. This option by itself only enables downloading to the
+ volatile memory, i.e. the card RAM. This option is required to
+ support cards that don't have firmware in flash, such as D-Link
+ DWL-520 rev E and D-Link DWL-650 rev P.
- Firmware image downloading needs user space tool, prism2_srec. It is
- available from http://hostap.epitest.fi/.
+ Firmware image downloading needs a user space tool, prism2_srec.
+ It is available from http://hostap.epitest.fi/.
+
+config HOSTAP_FIRMWARE_NVRAM
+ bool "Support for non-volatile firmware download"
+ depends on HOSTAP_FIRMWARE
+ ---help---
+ Allow Host AP driver to write firmware images to the non-volatile
+ card memory, i.e. flash memory that survives power cycling.
+ Enable this option if you want to be able to change card firmware
+ permanently.
+
+ Firmware image downloading needs a user space tool, prism2_srec.
+ It is available from http://hostap.epitest.fi/.
config HOSTAP_PLX
tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
diff --git a/drivers/net/wireless/hostap/Makefile b/drivers/net/wireless/hostap/Makefile
index 353ccb93134b..b8e41a702c00 100644
--- a/drivers/net/wireless/hostap/Makefile
+++ b/drivers/net/wireless/hostap/Makefile
@@ -1,4 +1,5 @@
-hostap-y := hostap_main.o
+hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
+ hostap_ioctl.o hostap_main.o hostap_proc.o
obj-$(CONFIG_HOSTAP) += hostap.o
obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index 5fac89b8ce3a..5e63765219fe 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -1,6 +1,15 @@
#ifndef HOSTAP_H
#define HOSTAP_H
+#include <linux/ethtool.h>
+
+#include "hostap_wlan.h"
+#include "hostap_ap.h"
+
+static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
+#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
+
/* hostap.c */
extern struct proc_dir_entry *hostap_proc;
@@ -40,6 +49,26 @@ int prism2_update_comms_qual(struct net_device *dev);
int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
u8 *body, size_t bodylen);
int prism2_sta_deauth(local_info_t *local, u16 reason);
+int prism2_wds_add(local_info_t *local, u8 *remote_addr,
+ int rtnl_locked);
+int prism2_wds_del(local_info_t *local, u8 *remote_addr,
+ int rtnl_locked, int do_not_remove);
+
+
+/* hostap_ap.c */
+
+int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
+int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
+void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
+int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
+void ap_control_kickall(struct ap_data *ap);
+void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
+ struct ieee80211_crypt_data ***crypt);
+int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
+ struct iw_quality qual[], int buf_size,
+ int aplist);
+int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
+int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
/* hostap_proc.c */
@@ -54,4 +83,12 @@ void hostap_info_init(local_info_t *local);
void hostap_info_process(local_info_t *local, struct sk_buff *skb);
+/* hostap_ioctl.c */
+
+extern const struct iw_handler_def hostap_iw_handler_def;
+extern struct ethtool_ops prism2_ethtool_ops;
+
+int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+
#endif /* HOSTAP_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
index bf506f50d722..1fc72fe511e9 100644
--- a/drivers/net/wireless/hostap/hostap_80211.h
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -1,6 +1,9 @@
#ifndef HOSTAP_80211_H
#define HOSTAP_80211_H
+#include <linux/types.h>
+#include <net/ieee80211_crypt.h>
+
struct hostap_ieee80211_mgmt {
u16 frame_control;
u16 duration;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index 4b13b76425c1..7e04dc94b3bc 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -1,7 +1,18 @@
#include <linux/etherdevice.h>
+#include <net/ieee80211_crypt.h>
#include "hostap_80211.h"
#include "hostap.h"
+#include "hostap_ap.h"
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char rfc1042_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char bridge_tunnel_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+/* No encapsulation header if EtherType < 0x600 (=length) */
void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 9d24f8a38ac5..4a85e63906f1 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,3 +1,18 @@
+#include "hostap_80211.h"
+#include "hostap_common.h"
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char rfc1042_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char bridge_tunnel_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+/* No encapsulation header if EtherType < 0x600 (=length) */
+
void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
{
struct ieee80211_hdr_4addr *hdr;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 9da94ab7f05f..753a1de6664b 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -16,6 +16,14 @@
* (8802.11: 5.5)
*/
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
+
static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
DEF_INTS };
module_param_array(other_ap_policy, int, NULL, 0444);
@@ -360,8 +368,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
}
-static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
- u8 *mac)
+int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
{
struct mac_entry *entry;
@@ -380,8 +387,7 @@ static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
}
-static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
- u8 *mac)
+int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
{
struct list_head *ptr;
struct mac_entry *entry;
@@ -433,7 +439,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
}
-static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
+void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
{
struct list_head *ptr, *n;
struct mac_entry *entry;
@@ -454,8 +460,7 @@ static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
}
-static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
- u8 *mac)
+int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
{
struct sta_info *sta;
u16 resp;
@@ -486,7 +491,7 @@ static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-static void ap_control_kickall(struct ap_data *ap)
+void ap_control_kickall(struct ap_data *ap)
{
struct list_head *ptr, *n;
struct sta_info *sta;
@@ -2321,9 +2326,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
}
-static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
- struct iw_quality qual[], int buf_size,
- int aplist)
+int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
+ struct iw_quality qual[], int buf_size,
+ int aplist)
{
struct ap_data *ap = local->ap;
struct list_head *ptr;
@@ -2363,7 +2368,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
/* Translate our list of Access Points & Stations to a card independant
* format that the Wireless Tools will understand - Jean II */
-static int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
+int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
{
struct hostap_interface *iface;
local_info_t *local;
@@ -2608,8 +2613,7 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
}
-static int prism2_hostapd(struct ap_data *ap,
- struct prism2_hostapd_param *param)
+int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
{
switch (param->cmd) {
case PRISM2_HOSTAPD_FLUSH:
@@ -3207,8 +3211,8 @@ void hostap_update_rates(local_info_t *local)
}
-static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
- struct ieee80211_crypt_data ***crypt)
+void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
+ struct ieee80211_crypt_data ***crypt)
{
struct sta_info *sta;
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
index 6d00df69c2e3..2fa2452b6b07 100644
--- a/drivers/net/wireless/hostap/hostap_ap.h
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -1,6 +1,8 @@
#ifndef HOSTAP_AP_H
#define HOSTAP_AP_H
+#include "hostap_80211.h"
+
/* AP data structures for STAs */
/* maximum number of frames to buffer per STA */
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
index 6f4fa9dc308f..01624005d808 100644
--- a/drivers/net/wireless/hostap/hostap_common.h
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -1,6 +1,9 @@
#ifndef HOSTAP_COMMON_H
#define HOSTAP_COMMON_H
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
#define BIT(x) (1 << (x))
#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index 7ed3425d08c1..c090a5aebb58 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -21,15 +21,10 @@
#define PRISM2_DOWNLOAD_SUPPORT
#endif
-#ifdef PRISM2_DOWNLOAD_SUPPORT
-/* Allow writing firmware images into flash, i.e., to non-volatile storage.
- * Before you enable this option, you should make absolutely sure that you are
- * using prism2_srec utility that comes with THIS version of the driver!
- * In addition, please note that it is possible to kill your card with
- * non-volatile download if you are using incorrect image. This feature has not
- * been fully tested, so please be careful with it. */
-/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
+/* Allow kernel configuration to enable non-volatile download support. */
+#ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM
+#define PRISM2_NON_VOLATILE_DOWNLOAD
+#endif
/* Save low-level I/O for debugging. This should not be enabled in normal use.
*/
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 5aa998fdf1c4..50f72d831cf4 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -1,5 +1,8 @@
/* Host AP driver Info Frame processing (part of hostap.o module) */
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
/* Called only as a tasklet (software IRQ) */
static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 2617d70bcda9..f3e0ce1ee037 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1,11 +1,13 @@
/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
-#ifdef in_atomic
-/* Get kernel_locked() for in_atomic() */
+#include <linux/types.h>
#include <linux/smp_lock.h>
-#endif
#include <linux/ethtool.h>
+#include <net/ieee80211_crypt.h>
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
{
@@ -3910,7 +3912,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
local->sta_fw_ver & 0xff);
}
-static struct ethtool_ops prism2_ethtool_ops = {
+struct ethtool_ops prism2_ethtool_ops = {
.get_drvinfo = prism2_get_drvinfo
};
@@ -3985,7 +3987,7 @@ static const iw_handler prism2_private_handler[] =
(iw_handler) prism2_ioctl_priv_readmif, /* 3 */
};
-static const struct iw_handler_def hostap_iw_handler_def =
+const struct iw_handler_def hostap_iw_handler_def =
{
.num_standard = sizeof(prism2_handler) / sizeof(iw_handler),
.num_private = sizeof(prism2_private_handler) / sizeof(iw_handler),
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 3d2ea61033be..8dd4c4446a64 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -24,6 +24,7 @@
#include <linux/kmod.h>
#include <linux/rtnetlink.h>
#include <linux/wireless.h>
+#include <linux/etherdevice.h>
#include <net/iw_handler.h>
#include <net/ieee80211.h>
#include <net/ieee80211_crypt.h>
@@ -47,57 +48,6 @@ MODULE_VERSION(PRISM2_VERSION);
#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
-/* hostap.c */
-static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
- int rtnl_locked);
-static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
- int rtnl_locked, int do_not_remove);
-
-/* hostap_ap.c */
-static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
- struct iw_quality qual[], int buf_size,
- int aplist);
-static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
-static int prism2_hostapd(struct ap_data *ap,
- struct prism2_hostapd_param *param);
-static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
- struct ieee80211_crypt_data ***crypt);
-static void ap_control_kickall(struct ap_data *ap);
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
- u8 *mac);
-static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
- u8 *mac);
-static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
-static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
- u8 *mac);
-#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-
-static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
- 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
-#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
-
-
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-static unsigned char rfc1042_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static unsigned char bridge_tunnel_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-/* No encapsulation header if EtherType < 0x600 (=length) */
-
-
-/* FIX: these could be compiled separately and linked together to hostap.o */
-#include "hostap_ap.c"
-#include "hostap_info.c"
-#include "hostap_ioctl.c"
-#include "hostap_proc.c"
-#include "hostap_80211_rx.c"
-#include "hostap_80211_tx.c"
-
-
struct net_device * hostap_add_interface(struct local_info *local,
int type, int rtnl_locked,
const char *prefix,
@@ -196,8 +146,8 @@ static inline int prism2_wds_special_addr(u8 *addr)
}
-static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
- int rtnl_locked)
+int prism2_wds_add(local_info_t *local, u8 *remote_addr,
+ int rtnl_locked)
{
struct net_device *dev;
struct list_head *ptr;
@@ -258,8 +208,8 @@ static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
}
-static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
- int rtnl_locked, int do_not_remove)
+int prism2_wds_del(local_info_t *local, u8 *remote_addr,
+ int rtnl_locked, int do_not_remove)
{
unsigned long flags;
struct list_head *ptr;
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index a0a4cbd4937a..d1d8ce022e63 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -1,5 +1,12 @@
/* /proc routines for Host AP driver */
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <net/ieee80211_crypt.h>
+
+#include "hostap_wlan.h"
+#include "hostap.h"
+
#define PROC_LIMIT (PAGE_SIZE - 80)
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index cfd801559492..87a54aa6f4dd 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -1,6 +1,10 @@
#ifndef HOSTAP_WLAN_H
#define HOSTAP_WLAN_H
+#include <linux/wireless.h>
+#include <linux/netdevice.h>
+#include <net/iw_handler.h>
+
#include "hostap_config.h"
#include "hostap_common.h"
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 7518384f34d9..8bf02763b5c7 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -5735,70 +5735,6 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev)
return &priv->ieee->stats;
}
-#if WIRELESS_EXT < 18
-/* Support for wpa_supplicant before WE-18, deprecated. */
-
-/* following definitions must match definitions in driver_ipw.c */
-
-#define IPW2100_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
-
-#define IPW2100_CMD_SET_WPA_PARAM 1
-#define IPW2100_CMD_SET_WPA_IE 2
-#define IPW2100_CMD_SET_ENCRYPTION 3
-#define IPW2100_CMD_MLME 4
-
-#define IPW2100_PARAM_WPA_ENABLED 1
-#define IPW2100_PARAM_TKIP_COUNTERMEASURES 2
-#define IPW2100_PARAM_DROP_UNENCRYPTED 3
-#define IPW2100_PARAM_PRIVACY_INVOKED 4
-#define IPW2100_PARAM_AUTH_ALGS 5
-#define IPW2100_PARAM_IEEE_802_1X 6
-
-#define IPW2100_MLME_STA_DEAUTH 1
-#define IPW2100_MLME_STA_DISASSOC 2
-
-#define IPW2100_CRYPT_ERR_UNKNOWN_ALG 2
-#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR 3
-#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED 4
-#define IPW2100_CRYPT_ERR_KEY_SET_FAILED 5
-#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED 6
-#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED 7
-
-#define IPW2100_CRYPT_ALG_NAME_LEN 16
-
-struct ipw2100_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u8 name;
- u32 value;
- } wpa_param;
- struct {
- u32 len;
- u8 reserved[32];
- u8 data[0];
- } wpa_ie;
- struct {
- u32 command;
- u32 reason_code;
- } mlme;
- struct {
- u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
- u8 set_tx;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[0];
- } crypt;
-
- } u;
-};
-
-/* end of driver_ipw.c code */
-#endif /* WIRELESS_EXT < 18 */
-
static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
{
/* This is called when wpa_supplicant loads and closes the driver
@@ -5807,11 +5743,6 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
return 0;
}
-#if WIRELESS_EXT < 18
-#define IW_AUTH_ALG_OPEN_SYSTEM 0x1
-#define IW_AUTH_ALG_SHARED_KEY 0x2
-#endif
-
static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
{
@@ -5855,360 +5786,6 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
ipw2100_set_wpa_ie(priv, &frame, 0);
}
-#if WIRELESS_EXT < 18
-static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value)
-{
- struct ipw2100_priv *priv = ieee80211_priv(dev);
- struct ieee80211_crypt_data *crypt;
- unsigned long flags;
- int ret = 0;
-
- switch (name) {
- case IPW2100_PARAM_WPA_ENABLED:
- ret = ipw2100_wpa_enable(priv, value);
- break;
-
- case IPW2100_PARAM_TKIP_COUNTERMEASURES:
- crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
- if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
- break;
-
- flags = crypt->ops->get_flags(crypt->priv);
-
- if (value)
- flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
- else
- flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
-
- crypt->ops->set_flags(flags, crypt->priv);
-
- break;
-
- case IPW2100_PARAM_DROP_UNENCRYPTED:{
- /* See IW_AUTH_DROP_UNENCRYPTED handling for details */
- struct ieee80211_security sec = {
- .flags = SEC_ENABLED,
- .enabled = value,
- };
- priv->ieee->drop_unencrypted = value;
- /* We only change SEC_LEVEL for open mode. Others
- * are set by ipw_wpa_set_encryption.
- */
- if (!value) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_0;
- } else {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1;
- }
- if (priv->ieee->set_security)
- priv->ieee->set_security(priv->ieee->dev, &sec);
- break;
- }
-
- case IPW2100_PARAM_PRIVACY_INVOKED:
- priv->ieee->privacy_invoked = value;
- break;
-
- case IPW2100_PARAM_AUTH_ALGS:
- ret = ipw2100_wpa_set_auth_algs(priv, value);
- break;
-
- case IPW2100_PARAM_IEEE_802_1X:
- priv->ieee->ieee802_1x = value;
- break;
-
- default:
- printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n",
- dev->name, name);
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason)
-{
-
- struct ipw2100_priv *priv = ieee80211_priv(dev);
- int ret = 0;
-
- switch (command) {
- case IPW2100_MLME_STA_DEAUTH:
- // silently ignore
- break;
-
- case IPW2100_MLME_STA_DISASSOC:
- ipw2100_disassociate_bssid(priv);
- break;
-
- default:
- printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n",
- dev->name, command);
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
- struct ipw2100_param *param, int plen)
-{
-
- struct ipw2100_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee;
- u8 *buf;
-
- if (!ieee->wpa_enabled)
- return -EOPNOTSUPP;
-
- if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
- (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
- return -EINVAL;
-
- if (param->u.wpa_ie.len) {
- buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
-
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = buf;
- ieee->wpa_ie_len = param->u.wpa_ie.len;
-
- } else {
- kfree(ieee->wpa_ie);
- ieee->wpa_ie = NULL;
- ieee->wpa_ie_len = 0;
- }
-
- ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
-
- return 0;
-}
-
-/* implementation borrowed from hostap driver */
-
-static int ipw2100_wpa_set_encryption(struct net_device *dev,
- struct ipw2100_param *param,
- int param_len)
-{
- int ret = 0;
- struct ipw2100_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee;
- struct ieee80211_crypto_ops *ops;
- struct ieee80211_crypt_data **crypt;
-
- struct ieee80211_security sec = {
- .flags = 0,
- };
-
- param->u.crypt.err = 0;
- param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
- if (param_len !=
- (int)((char *)param->u.crypt.key - (char *)param) +
- param->u.crypt.key_len) {
- IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
- param->u.crypt.key_len);
- return -EINVAL;
- }
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS)
- return -EINVAL;
- crypt = &ieee->crypt[param->u.crypt.idx];
- } else {
- return -EINVAL;
- }
-
- sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
- if (strcmp(param->u.crypt.alg, "none") == 0) {
- if (crypt) {
- sec.enabled = 0;
- sec.encrypt = 0;
- sec.level = SEC_LEVEL_0;
- sec.flags |= SEC_LEVEL;
- ieee80211_crypt_delayed_deinit(ieee, crypt);
- }
- goto done;
- }
- sec.enabled = 1;
- sec.encrypt = 1;
-
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
- request_module("ieee80211_crypt_wep");
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
- request_module("ieee80211_crypt_tkip");
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
- request_module("ieee80211_crypt_ccmp");
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- }
- if (ops == NULL) {
- IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
- dev->name, param->u.crypt.alg);
- param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
- ret = -EINVAL;
- goto done;
- }
-
- if (*crypt == NULL || (*crypt)->ops != ops) {
- struct ieee80211_crypt_data *new_crypt;
-
- ieee80211_crypt_delayed_deinit(ieee, crypt);
-
- new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
- if (new_crypt == NULL) {
- ret = -ENOMEM;
- goto done;
- }
- new_crypt->ops = ops;
- if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
- new_crypt->priv =
- new_crypt->ops->init(param->u.crypt.idx);
-
- if (new_crypt->priv == NULL) {
- kfree(new_crypt);
- param->u.crypt.err =
- IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
- ret = -EINVAL;
- goto done;
- }
-
- *crypt = new_crypt;
- }
-
- if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
- (*crypt)->ops->set_key(param->u.crypt.key,
- param->u.crypt.key_len, param->u.crypt.seq,
- (*crypt)->priv) < 0) {
- IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
- param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
- ret = -EINVAL;
- goto done;
- }
-
- if (param->u.crypt.set_tx) {
- ieee->tx_keyidx = param->u.crypt.idx;
- sec.active_key = param->u.crypt.idx;
- sec.flags |= SEC_ACTIVE_KEY;
- }
-
- if (ops->name != NULL) {
-
- if (strcmp(ops->name, "WEP") == 0) {
- memcpy(sec.keys[param->u.crypt.idx],
- param->u.crypt.key, param->u.crypt.key_len);
- sec.key_sizes[param->u.crypt.idx] =
- param->u.crypt.key_len;
- sec.flags |= (1 << param->u.crypt.idx);
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_1;
- } else if (strcmp(ops->name, "TKIP") == 0) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_2;
- } else if (strcmp(ops->name, "CCMP") == 0) {
- sec.flags |= SEC_LEVEL;
- sec.level = SEC_LEVEL_3;
- }
- }
- done:
- if (ieee->set_security)
- ieee->set_security(ieee->dev, &sec);
-
- /* Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
- param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p)
-{
-
- struct ipw2100_param *param;
- int ret = 0;
-
- IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
-
- if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
- return -EINVAL;
-
- param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
-
- switch (param->cmd) {
-
- case IPW2100_CMD_SET_WPA_PARAM:
- ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
- param->u.wpa_param.value);
- break;
-
- case IPW2100_CMD_SET_WPA_IE:
- ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
- break;
-
- case IPW2100_CMD_SET_ENCRYPTION:
- ret = ipw2100_wpa_set_encryption(dev, param, p->length);
- break;
-
- case IPW2100_CMD_MLME:
- ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
- param->u.mlme.reason_code);
- break;
-
- default:
- printk(KERN_ERR DRV_NAME
- ": %s: Unknown WPA supplicant request: %d\n", dev->name,
- param->cmd);
- ret = -EOPNOTSUPP;
-
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
- return ret;
-}
-
-static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct iwreq *wrq = (struct iwreq *)rq;
- int ret = -1;
- switch (cmd) {
- case IPW2100_IOCTL_WPA_SUPPLICANT:
- ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
- return ret;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return -EOPNOTSUPP;
-}
-#endif /* WIRELESS_EXT < 18 */
-
static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -6337,9 +5914,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
dev->open = ipw2100_open;
dev->stop = ipw2100_close;
dev->init = ipw2100_net_init;
-#if WIRELESS_EXT < 18
- dev->do_ioctl = ipw2100_ioctl;
-#endif
dev->get_stats = ipw2100_stats;
dev->ethtool_ops = &ipw2100_ethtool_ops;
dev->tx_timeout = ipw2100_tx_timeout;
@@ -7855,7 +7429,6 @@ static int ipw2100_wx_get_power(struct net_device *dev,
return 0;
}
-#if WIRELESS_EXT > 17
/*
* WE-18 WPA support
*/
@@ -8117,7 +7690,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
}
return 0;
}
-#endif /* WIRELESS_EXT > 17 */
/*
*
@@ -8350,11 +7922,7 @@ static iw_handler ipw2100_wx_handlers[] = {
NULL, /* SIOCWIWTHRSPY */
ipw2100_wx_set_wap, /* SIOCSIWAP */
ipw2100_wx_get_wap, /* SIOCGIWAP */
-#if WIRELESS_EXT > 17
ipw2100_wx_set_mlme, /* SIOCSIWMLME */
-#else
- NULL, /* -- hole -- */
-#endif
NULL, /* SIOCGIWAPLIST -- deprecated */
ipw2100_wx_set_scan, /* SIOCSIWSCAN */
ipw2100_wx_get_scan, /* SIOCGIWSCAN */
@@ -8378,7 +7946,6 @@ static iw_handler ipw2100_wx_handlers[] = {
ipw2100_wx_get_encode, /* SIOCGIWENCODE */
ipw2100_wx_set_power, /* SIOCSIWPOWER */
ipw2100_wx_get_power, /* SIOCGIWPOWER */
-#if WIRELESS_EXT > 17
NULL, /* -- hole -- */
NULL, /* -- hole -- */
ipw2100_wx_set_genie, /* SIOCSIWGENIE */
@@ -8388,7 +7955,6 @@ static iw_handler ipw2100_wx_handlers[] = {
ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */
ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */
NULL, /* SIOCSIWPMKSA */
-#endif
};
#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 819be2b6b7df..4c28e332ecc3 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -8936,14 +8936,12 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
IPW_DEBUG_HC("starting request direct scan!\n");
if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
- err = wait_event_interruptible(priv->wait_state,
- !(priv->
- status & (STATUS_SCANNING |
- STATUS_SCAN_ABORTING)));
- if (err) {
- IPW_DEBUG_HC("aborting direct scan");
- goto done;
- }
+ /* We should not sleep here; otherwise we will block most
+ * of the system (for instance, we hold rtnl_lock when we
+ * get here).
+ */
+ err = -EAGAIN;
+ goto done;
}
memset(&scan, 0, sizeof(scan));
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 135a156db25d..c5cd61c7f927 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
if (essid->length) {
dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */
/* if it is to big, trunk it */
- dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1);
+ dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length);
} else {
dwrq->flags = 0;
dwrq->length = 0;
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 33d64d2ee53f..a8261d8454dd 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
#endif
newskb->dev = skb->dev;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
skb = newskb;
}
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 319180ca7e71..7880d8c31aad 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev,
extra[IW_ESSID_MAX_SIZE] = '\0';
/* Push it out ! */
- dwrq->length = strlen(extra) + 1;
+ dwrq->length = strlen(extra);
dwrq->flags = 1; /* active */
return 0;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 7e2039f52c49..cf373625fc70 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev,
extra[IW_ESSID_MAX_SIZE] = '\0';
/* Set the length */
- wrqu->data.length = strlen(extra) + 1;
+ wrqu->data.length = strlen(extra);
return 0;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 605f0df0bfba..dda6099903c1 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1142,6 +1142,9 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
case 0x27c4:
ich = 7;
break;
+ case 0x2828: /* ICH8M */
+ ich = 8;
+ break;
default:
/* we do not handle this PCI device */
return;
@@ -1161,7 +1164,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
else
return; /* not in combined mode */
} else {
- WARN_ON((ich != 6) && (ich != 7));
+ WARN_ON((ich != 6) && (ich != 7) && (ich != 8));
tmp &= 0x3; /* interesting bits 1:0 */
if (tmp & (1 << 0))
comb = (1 << 2); /* PATA port 0, SATA port 1 */
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index d113290b5fc0..19bd346951dd 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -276,6 +276,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
board_ahci }, /* ESB2 */
{ PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* ICH7-M DH */
+ { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ICH8 */
+ { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ICH8 */
+ { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ICH8 */
+ { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ICH8M */
+ { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ICH8M */
{ } /* terminate list */
};
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 557788ec4eec..fc3ca051ceed 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -157,6 +157,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
{ } /* terminate list */
};
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 99bae8369ab2..46c4cdbaee86 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -611,6 +611,10 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
if (dev->flags & ATA_DFLAG_PIO) {
tf->protocol = ATA_PROT_PIO;
index = dev->multi_count ? 0 : 8;
+ } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
+ /* Unable to use DMA due to host limitation */
+ tf->protocol = ATA_PROT_PIO;
+ index = dev->multi_count ? 0 : 4;
} else {
tf->protocol = ATA_PROT_DMA;
index = 16;
@@ -1051,18 +1055,22 @@ static unsigned int ata_pio_modes(const struct ata_device *adev)
{
u16 modes;
- /* Usual case. Word 53 indicates word 88 is valid */
- if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
+ /* Usual case. Word 53 indicates word 64 is valid */
+ if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
modes <<= 3;
modes |= 0x7;
return modes;
}
- /* If word 88 isn't valid then Word 51 holds the PIO timing number
- for the maximum. Turn it into a mask and return it */
- modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+ /* If word 64 isn't valid then Word 51 high byte holds the PIO timing
+ number for the maximum. Turn it into a mask and return it */
+ modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
return modes;
+ /* But wait.. there's more. Design your standards by committee and
+ you too can get a free iordy field to process. However its the
+ speeds not the modes that are supported... Note drivers using the
+ timing API will get this right anyway */
}
struct ata_exec_internal_arg {
@@ -1165,6 +1173,39 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
}
/**
+ * ata_pio_need_iordy - check if iordy needed
+ * @adev: ATA device
+ *
+ * Check if the current speed of the device requires IORDY. Used
+ * by various controllers for chip configuration.
+ */
+
+unsigned int ata_pio_need_iordy(const struct ata_device *adev)
+{
+ int pio;
+ int speed = adev->pio_mode - XFER_PIO_0;
+
+ if (speed < 2)
+ return 0;
+ if (speed > 2)
+ return 1;
+
+ /* If we have no drive specific rule, then PIO 2 is non IORDY */
+
+ if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
+ pio = adev->id[ATA_ID_EIDE_PIO];
+ /* Is the speed faster than the drive allows non IORDY ? */
+ if (pio) {
+ /* This is cycle times not frequency - watch the logic! */
+ if (pio > 240) /* PIO2 is 240nS per cycle */
+ return 1;
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/**
* ata_dev_identify - obtain IDENTIFY x DEVICE page
* @ap: port on which device we wish to probe resides
* @device: device bus address, starting at zero
@@ -1415,7 +1456,7 @@ void ata_dev_config(struct ata_port *ap, unsigned int i)
ap->udma_mask &= ATA_UDMA5;
ap->host->max_sectors = ATA_MAX_SECTORS;
ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
- ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
+ ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
}
if (ap->ops->dev_config)
@@ -3056,10 +3097,21 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int do_write)
{
- if (ap->flags & ATA_FLAG_MMIO)
- ata_mmio_data_xfer(ap, buf, buflen, do_write);
- else
- ata_pio_data_xfer(ap, buf, buflen, do_write);
+ /* Make the crap hardware pay the costs not the good stuff */
+ if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
+ unsigned long flags;
+ local_irq_save(flags);
+ if (ap->flags & ATA_FLAG_MMIO)
+ ata_mmio_data_xfer(ap, buf, buflen, do_write);
+ else
+ ata_pio_data_xfer(ap, buf, buflen, do_write);
+ local_irq_restore(flags);
+ } else {
+ if (ap->flags & ATA_FLAG_MMIO)
+ ata_mmio_data_xfer(ap, buf, buflen, do_write);
+ else
+ ata_pio_data_xfer(ap, buf, buflen, do_write);
+ }
}
/**
@@ -5122,6 +5174,7 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string);
EXPORT_SYMBOL_GPL(ata_dev_config);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
EXPORT_SYMBOL_GPL(ata_timing_compute);
EXPORT_SYMBOL_GPL(ata_timing_merge);
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 3d1ea09a06a1..b0b0a69b3563 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -66,6 +66,7 @@ enum {
board_2037x = 0, /* FastTrak S150 TX2plus */
board_20319 = 1, /* FastTrak S150 TX4 */
board_20619 = 2, /* FastTrak TX4000 */
+ board_20771 = 3, /* FastTrak TX2300 */
PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */
@@ -190,6 +191,16 @@ static const struct ata_port_info pdc_port_info[] = {
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
.port_ops = &pdc_pata_ops,
},
+
+ /* board_20771 */
+ {
+ .sht = &pdc_ata_sht,
+ .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = 0x7f, /* udma0-6 ; FIXME */
+ .port_ops = &pdc_sata_ops,
+ },
};
static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -226,6 +237,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
{ PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20619 },
+ { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_20771 },
{ } /* terminate list */
};
@@ -706,6 +719,9 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
case board_2037x:
probe_ent->n_ports = 2;
break;
+ case board_20771:
+ probe_ent->n_ports = 2;
+ break;
case board_20619:
probe_ent->n_ports = 4;
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 668373590aa4..d8472563fde8 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -470,6 +470,7 @@ static const struct pci_device_id k2_sata_pci_tbl[] = {
{ 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ }
};
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index d9ce8c549416..bc36edff2058 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2595,15 +2595,11 @@ static int __init serial8250_init(void)
if (ret)
goto out;
- ret = platform_driver_register(&serial8250_isa_driver);
- if (ret)
- goto unreg_uart_drv;
-
serial8250_isa_devs = platform_device_alloc("serial8250",
PLAT8250_DEV_LEGACY);
if (!serial8250_isa_devs) {
ret = -ENOMEM;
- goto unreg_plat_drv;
+ goto unreg_uart_drv;
}
ret = platform_device_add(serial8250_isa_devs);
@@ -2612,12 +2608,13 @@ static int __init serial8250_init(void)
serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
- goto out;
+ ret = platform_driver_register(&serial8250_isa_driver);
+ if (ret == 0)
+ goto out;
+ platform_device_del(serial8250_isa_devs);
put_dev:
platform_device_put(serial8250_isa_devs);
- unreg_plat_drv:
- platform_driver_unregister(&serial8250_isa_driver);
unreg_uart_drv:
uart_unregister_driver(&serial8250_reg);
out:
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 589fb076654a..2a912153321e 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -940,6 +940,7 @@ enum pci_board_num_t {
pbn_b2_bt_2_921600,
pbn_b2_bt_4_921600,
+ pbn_b3_2_115200,
pbn_b3_4_115200,
pbn_b3_8_115200,
@@ -1311,6 +1312,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.uart_offset = 8,
},
+ [pbn_b3_2_115200] = {
+ .flags = FL_BASE3,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[pbn_b3_4_115200] = {
.flags = FL_BASE3,
.num_ports = 4,
@@ -2272,6 +2279,9 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_nec_nile4 },
+ { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b3_2_115200 },
{ PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b3_4_115200 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 5e7199f7b59c..9fd1925de361 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -301,7 +301,7 @@ config SERIAL_AT91_TTYAT
depends on SERIAL_AT91=y
help
Say Y here if you wish to have the five internal AT91RM9200 UARTs
- appear as /dev/ttyAT0-4 (major 240, minor 0-4) instead of the
+ appear as /dev/ttyAT0-4 (major 204, minor 154-158) instead of the
normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if
you also want other UARTs, such as external 8250/16C550 compatible
UARTs.
diff --git a/drivers/serial/at91_serial.c b/drivers/serial/at91_serial.c
index 0e206063d685..2113feb75c39 100644
--- a/drivers/serial/at91_serial.c
+++ b/drivers/serial/at91_serial.c
@@ -222,8 +222,6 @@ static void at91_rx_chars(struct uart_port *port, struct pt_regs *regs)
while (status & (AT91_US_RXRDY)) {
ch = UART_GET_CHAR(port);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- goto ignore_char;
port->icount.rx++;
flg = TTY_NORMAL;
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index 5fc4a62173d9..fa4ae94243c2 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -34,6 +34,7 @@ sunserial_console_termios(struct console *con)
char *mode_prop = "ttyX-mode";
char *cd_prop = "ttyX-ignore-cd";
char *dtr_prop = "ttyX-rts-dtr-off";
+ char *ssp_console_modes_prop = "ssp-console-modes";
int baud, bits, stop, cflag;
char parity;
int carrier = 0;
@@ -43,14 +44,39 @@ sunserial_console_termios(struct console *con)
if (!serial_console)
return;
- if (serial_console == 1) {
+ switch (serial_console) {
+ case PROMDEV_OTTYA:
mode_prop[3] = 'a';
cd_prop[3] = 'a';
dtr_prop[3] = 'a';
- } else {
+ break;
+
+ case PROMDEV_OTTYB:
mode_prop[3] = 'b';
cd_prop[3] = 'b';
dtr_prop[3] = 'b';
+ break;
+
+ case PROMDEV_ORSC:
+
+ nd = prom_pathtoinode("rsc");
+ if (!nd) {
+ strcpy(mode, "115200,8,n,1,-");
+ goto no_options;
+ }
+
+ if (!prom_node_has_property(nd, ssp_console_modes_prop)) {
+ strcpy(mode, "115200,8,n,1,-");
+ goto no_options;
+ }
+
+ memset(mode, 0, sizeof(mode));
+ prom_getstring(nd, ssp_console_modes_prop, mode, sizeof(mode));
+ goto no_options;
+
+ default:
+ strcpy(mode, "9600,8,n,1,-");
+ goto no_options;
}
topnd = prom_getchild(prom_root_node);
@@ -110,6 +136,10 @@ no_options:
case 9600: cflag |= B9600; break;
case 19200: cflag |= B19200; break;
case 38400: cflag |= B38400; break;
+ case 57600: cflag |= B57600; break;
+ case 115200: cflag |= B115200; break;
+ case 230400: cflag |= B230400; break;
+ case 460800: cflag |= B460800; break;
default: baud = 9600; cflag |= B9600; break;
}
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 7e773ff76c61..8bcaebcc0ad7 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -897,9 +897,6 @@ static int sunsab_console_setup(struct console *con, char *options)
sunserial_console_termios(con);
- /* Firmware console speed is limited to 150-->38400 baud so
- * this hackish cflag thing is OK.
- */
switch (con->cflag & CBAUD) {
case B150: baud = 150; break;
case B300: baud = 300; break;
@@ -910,6 +907,10 @@ static int sunsab_console_setup(struct console *con, char *options)
default: case B9600: baud = 9600; break;
case B19200: baud = 19200; break;
case B38400: baud = 38400; break;
+ case B57600: baud = 57600; break;
+ case B115200: baud = 115200; break;
+ case B230400: baud = 230400; break;
+ case B460800: baud = 460800; break;
};
/*
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 55e6e2d60d3a..a4d7cc51ce0b 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -199,8 +199,7 @@ struct fbcmap32 {
#define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32)
#define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32)
-static int fbiogetputcmap(struct file *file, struct fb_info *info,
- unsigned int cmd, unsigned long arg)
+static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
struct fbcmap32 __user *argp = (void __user *)arg;
struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
@@ -236,8 +235,7 @@ struct fbcursor32 {
#define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32)
#define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32)
-static int fbiogscursor(struct file *file, struct fb_info *info,
- unsigned long arg)
+static int fbiogscursor(struct fb_info *info, unsigned long arg)
{
struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
struct fbcursor32 __user *argp = (void __user *)arg;
@@ -263,8 +261,7 @@ static int fbiogscursor(struct file *file, struct fb_info *info,
return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
}
-long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
+int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FBIOGTYPE:
diff --git a/drivers/video/sbuslib.h b/drivers/video/sbuslib.h
index f753939013ed..492828c3fe8f 100644
--- a/drivers/video/sbuslib.h
+++ b/drivers/video/sbuslib.h
@@ -20,7 +20,7 @@ extern int sbusfb_mmap_helper(struct sbus_mmap_map *map,
int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
struct fb_info *info,
int type, int fb_depth, unsigned long fb_size);
-long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
+int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg);
#endif /* _SBUSLIB_H */
OpenPOWER on IntegriCloud