summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/operstates.txt161
-rw-r--r--Documentation/scsi/ChangeLog.megaraid25
-rw-r--r--MAINTAINERS5
-rw-r--r--Makefile20
-rw-r--r--arch/i386/kernel/io_apic.c5
-rw-r--r--arch/i386/kernel/mpparse.c12
-rw-r--r--arch/ia64/lib/memcpy_mck.S9
-rw-r--r--arch/ppc/kernel/head_8xx.S4
-rw-r--r--arch/x86_64/kernel/e820.c6
-rw-r--r--arch/x86_64/kernel/io_apic.c5
-rw-r--r--arch/x86_64/kernel/mpparse.c12
-rw-r--r--arch/x86_64/kernel/pci-gart.c8
-rw-r--r--arch/x86_64/kernel/setup.c30
-rw-r--r--arch/x86_64/kernel/traps.c12
-rw-r--r--block/elevator.c8
-rw-r--r--block/ll_rw_blk.c17
-rw-r--r--drivers/base/class.c32
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c10
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c11
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c41
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c31
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c195
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h4
-rw-r--r--drivers/message/fusion/mptbase.c63
-rw-r--r--drivers/message/fusion/mptbase.h10
-rw-r--r--drivers/message/fusion/mptfc.c134
-rw-r--r--drivers/message/fusion/mptsas.c99
-rw-r--r--drivers/message/fusion/mptscsih.c50
-rw-r--r--drivers/message/fusion/mptspi.c68
-rw-r--r--drivers/net/au1000_eth.c18
-rw-r--r--drivers/net/dl2k.c12
-rw-r--r--drivers/net/irda/Makefile2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/sir-dev.h13
-rw-r--r--drivers/net/irda/sir_dev.c315
-rw-r--r--drivers/net/irda/sir_kthread.c508
-rw-r--r--drivers/net/irda/smsc-ircc2.c14
-rw-r--r--drivers/net/ne.c31
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/sky2.c222
-rw-r--r--drivers/net/sky2.h3
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/spider_net.h2
-rw-r--r--drivers/net/sungem_phy.c45
-rw-r--r--drivers/net/sungem_phy.h1
-rw-r--r--drivers/net/tg3.c3
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c45
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c7
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c95
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c59
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h7
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c19
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/serial/8250.c74
-rw-r--r--drivers/serial/8250_au1x00.c5
-rw-r--r--drivers/serial/serial_core.c114
-rw-r--r--drivers/video/logo/Makefile2
-rw-r--r--fs/locks.c21
-rw-r--r--fs/xfs/xfs_alloc.c5
-rw-r--r--fs/xfs/xfs_rename.c12
-rw-r--r--fs/xfs/xfs_vfsops.c27
-rw-r--r--fs/xfs/xfs_vnodeops.c2
-rw-r--r--include/asm-i386/io_apic.h1
-rw-r--r--include/asm-ia64/bitops.h1
-rw-r--r--include/asm-ppc/page.h1
-rw-r--r--include/asm-x86_64/e820.h2
-rw-r--r--include/asm-x86_64/io_apic.h1
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dma-mapping.h1
-rw-r--r--include/linux/netdevice.h23
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/net/ieee80211.h6
-rw-r--r--include/net/ieee80211softmac.h3
-rw-r--r--include/scsi/srp.h23
-rw-r--r--kernel/ptrace.c20
-rw-r--r--net/bridge/br_if.c21
-rw-r--r--net/core/dev.c99
-rw-r--r--net/core/link_watch.c10
-rw-r--r--net/core/net-sysfs.c49
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c17
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c16
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c4
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_scan.c8
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/irda/irias_object.c3
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--scripts/gen_initramfs_list.sh6
-rw-r--r--scripts/mkmakefile5
-rw-r--r--scripts/mod/modpost.c6
117 files changed, 2085 insertions, 1512 deletions
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt
new file mode 100644
index 000000000000..4a21d9bb836b
--- /dev/null
+++ b/Documentation/networking/operstates.txt
@@ -0,0 +1,161 @@
+
+1. Introduction
+
+Linux distinguishes between administrative and operational state of an
+interface. Admininstrative state is the result of "ip link set dev
+<dev> up or down" and reflects whether the administrator wants to use
+the device for traffic.
+
+However, an interface is not usable just because the admin enabled it
+- ethernet requires to be plugged into the switch and, depending on
+a site's networking policy and configuration, an 802.1X authentication
+to be performed before user data can be transferred. Operational state
+shows the ability of an interface to transmit this user data.
+
+Thanks to 802.1X, userspace must be granted the possibility to
+influence operational state. To accommodate this, operational state is
+split into two parts: Two flags that can be set by the driver only, and
+a RFC2863 compatible state that is derived from these flags, a policy,
+and changeable from userspace under certain rules.
+
+
+2. Querying from userspace
+
+Both admin and operational state can be queried via the netlink
+operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK
+to be notified of updates. This is important for setting from userspace.
+
+These values contain interface state:
+
+ifinfomsg::if_flags & IFF_UP:
+ Interface is admin up
+ifinfomsg::if_flags & IFF_RUNNING:
+ Interface is in RFC2863 operational state UP or UNKNOWN. This is for
+ backward compatibility, routing daemons, dhcp clients can use this
+ flag to determine whether they should use the interface.
+ifinfomsg::if_flags & IFF_LOWER_UP:
+ Driver has signaled netif_carrier_on()
+ifinfomsg::if_flags & IFF_DORMANT:
+ Driver has signaled netif_dormant_on()
+
+These interface flags can also be queried without netlink using the
+SIOCGIFFLAGS ioctl.
+
+TLV IFLA_OPERSTATE
+
+contains RFC2863 state of the interface in numeric representation:
+
+IF_OPER_UNKNOWN (0):
+ Interface is in unknown state, neither driver nor userspace has set
+ operational state. Interface must be considered for user data as
+ setting operational state has not been implemented in every driver.
+IF_OPER_NOTPRESENT (1):
+ Unused in current kernel (notpresent interfaces normally disappear),
+ just a numerical placeholder.
+IF_OPER_DOWN (2):
+ Interface is unable to transfer data on L1, f.e. ethernet is not
+ plugged or interface is ADMIN down.
+IF_OPER_LOWERLAYERDOWN (3):
+ Interfaces stacked on an interface that is IF_OPER_DOWN show this
+ state (f.e. VLAN).
+IF_OPER_TESTING (4):
+ Unused in current kernel.
+IF_OPER_DORMANT (5):
+ Interface is L1 up, but waiting for an external event, f.e. for a
+ protocol to establish. (802.1X)
+IF_OPER_UP (6):
+ Interface is operational up and can be used.
+
+This TLV can also be queried via sysfs.
+
+TLV IFLA_LINKMODE
+
+contains link policy. This is needed for userspace interaction
+described below.
+
+This TLV can also be queried via sysfs.
+
+
+3. Kernel driver API
+
+Kernel drivers have access to two flags that map to IFF_LOWER_UP and
+IFF_DORMANT. These flags can be set from everywhere, even from
+interrupts. It is guaranteed that only the driver has write access,
+however, if different layers of the driver manipulate the same flag,
+the driver has to provide the synchronisation needed.
+
+__LINK_STATE_NOCARRIER, maps to !IFF_LOWER_UP:
+
+The driver uses netif_carrier_on() to clear and netif_carrier_off() to
+set this flag. On netif_carrier_off(), the scheduler stops sending
+packets. The name 'carrier' and the inversion are historical, think of
+it as lower layer.
+
+netif_carrier_ok() can be used to query that bit.
+
+__LINK_STATE_DORMANT, maps to IFF_DORMANT:
+
+Set by the driver to express that the device cannot yet be used
+because some driver controlled protocol establishment has to
+complete. Corresponding functions are netif_dormant_on() to set the
+flag, netif_dormant_off() to clear it and netif_dormant() to query.
+
+On device allocation, networking core sets the flags equivalent to
+netif_carrier_ok() and !netif_dormant().
+
+
+Whenever the driver CHANGES one of these flags, a workqueue event is
+scheduled to translate the flag combination to IFLA_OPERSTATE as
+follows:
+
+!netif_carrier_ok():
+ IF_OPER_LOWERLAYERDOWN if the interface is stacked, IF_OPER_DOWN
+ otherwise. Kernel can recognise stacked interfaces because their
+ ifindex != iflink.
+
+netif_carrier_ok() && netif_dormant():
+ IF_OPER_DORMANT
+
+netif_carrier_ok() && !netif_dormant():
+ IF_OPER_UP if userspace interaction is disabled. Otherwise
+ IF_OPER_DORMANT with the possibility for userspace to initiate the
+ IF_OPER_UP transition afterwards.
+
+
+4. Setting from userspace
+
+Applications have to use the netlink interface to influence the
+RFC2863 operational state of an interface. Setting IFLA_LINKMODE to 1
+via RTM_SETLINK instructs the kernel that an interface should go to
+IF_OPER_DORMANT instead of IF_OPER_UP when the combination
+netif_carrier_ok() && !netif_dormant() is set by the
+driver. Afterwards, the userspace application can set IFLA_OPERSTATE
+to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set
+netif_carrier_off() or netif_dormant_on(). Changes made by userspace
+are multicasted on the netlink group RTMGRP_LINK.
+
+So basically a 802.1X supplicant interacts with the kernel like this:
+
+-subscribe to RTMGRP_LINK
+-set IFLA_LINKMODE to 1 via RTM_SETLINK
+-query RTM_GETLINK once to get initial state
+-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until
+ netlink multicast signals this state
+-do 802.1X, eventually abort if flags go down again
+-send RTM_SETLINK to set operstate to IF_OPER_UP if authentication
+ succeeds, IF_OPER_DORMANT otherwise
+-see how operstate and IFF_RUNNING is echoed via netlink multicast
+-set interface back to IF_OPER_DORMANT if 802.1X reauthentication
+ fails
+-restart if kernel changes IFF_LOWER_UP or IFF_DORMANT flag
+
+if supplicant goes down, bring back IFLA_LINKMODE to 0 and
+IFLA_OPERSTATE to a sane value.
+
+A routing daemon or dhcp client just needs to care for IFF_RUNNING or
+waiting for operstate to go IF_OPER_UP/IF_OPER_UNKNOWN before
+considering the interface / querying a DHCP address.
+
+
+For technical questions and/or comments please e-mail to Stefan Rompf
+(stefan at loplof.de).
diff --git a/Documentation/scsi/ChangeLog.megaraid b/Documentation/scsi/ChangeLog.megaraid
index 09f6300eda4b..c173806c91fa 100644
--- a/Documentation/scsi/ChangeLog.megaraid
+++ b/Documentation/scsi/ChangeLog.megaraid
@@ -1,3 +1,28 @@
+Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju <sju@lsil.com>
+Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
+Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module)
+
+1. Fixed a bug in megaraid_reset_handler().
+ Customer reported "Unable to handle kernel NULL pointer dereference
+ at virtual address 00000000" when system goes to reset condition
+ for some reason. It happened randomly.
+ Root Cause: in the megaraid_reset_handler(), there is possibility not
+ returning pending packets in the pend_list if there are multiple
+ pending packets.
+ Fix: Made the change in the driver so that it will return all packets
+ in the pend_list.
+
+2. Added change request.
+ As found in the following URL, rmb() only didn't help the
+ problem. I had to increase the loop counter to 0xFFFFFF. (6 F's)
+ http://marc.theaimsgroup.com/?l=linux-scsi&m=110971060502497&w=2
+
+ I attached a patch for your reference, too.
+ Could you check and get this fix in your driver?
+
+ Best Regards,
+ Jun'ichi Nomura
+
Release Date : Fri Nov 11 12:27:22 EST 2005 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.6 (scsi module), 2.20.2.6 (cmm module)
diff --git a/MAINTAINERS b/MAINTAINERS
index d6a8e5b434ec..5e3355871416 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1480,10 +1480,11 @@ L: netdev@vger.kernel.org
S: Maintained
IRDA SUBSYSTEM
-P: Jean Tourrilhes
+P: Samuel Ortiz
+M: samuel@sortiz.org
L: irda-users@lists.sourceforge.net (subscribers-only)
W: http://irda.sourceforge.net/
-S: Odd Fixes
+S: Maintained
ISAPNP
P: Jaroslav Kysela
diff --git a/Makefile b/Makefile
index 6bf99624bd4c..9e4c5692a32f 100644
--- a/Makefile
+++ b/Makefile
@@ -344,16 +344,14 @@ scripts_basic:
scripts/basic/%: scripts_basic ;
PHONY += outputmakefile
-# outputmakefile generate a Makefile to be placed in output directory, if
-# using a seperate output directory. This allows convinient use
-# of make in output directory
+# outputmakefile generates a Makefile in the output directory, if using a
+# separate output directory. This allows convenient use of make in the
+# output directory.
outputmakefile:
- $(Q)if test ! $(srctree) -ef $(objtree); then \
- $(CONFIG_SHELL) $(srctree)/scripts/mkmakefile \
- $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL) \
- > $(objtree)/Makefile; \
- echo ' GEN $(objtree)/Makefile'; \
- fi
+ifneq ($(KBUILD_SRC),)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile \
+ $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
+endif
# To make sure we do not include .config for any of the *config targets
# catch them early, and hand them over to scripts/kconfig/Makefile
@@ -796,8 +794,8 @@ prepare2: prepare3 outputmakefile
prepare1: prepare2 include/linux/version.h include/asm \
include/config/MARKER
ifneq ($(KBUILD_MODULES),)
- $(Q)rm -rf $(MODVERDIR)
$(Q)mkdir -p $(MODVERDIR)
+ $(Q)rm -f $(MODVERDIR)/*
endif
archprepare: prepare1 scripts_basic
@@ -1086,8 +1084,8 @@ else # KBUILD_EXTMOD
KBUILD_MODULES := 1
PHONY += crmodverdir
crmodverdir:
- $(Q)rm -rf $(MODVERDIR)
$(Q)mkdir -p $(MODVERDIR)
+ $(Q)rm -f $(MODVERDIR)/*
PHONY += $(objtree)/Module.symvers
$(objtree)/Module.symvers:
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index f8f132aa5472..d70f2ade5cde 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -2238,6 +2238,8 @@ static inline void unlock_ExtINT_logic(void)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
+int timer_uses_ioapic_pin_0;
+
/*
* This code may look a bit paranoid, but it's supposed to cooperate with
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
@@ -2274,6 +2276,9 @@ static inline void check_timer(void)
pin2 = ioapic_i8259.pin;
apic2 = ioapic_i8259.apic;
+ if (pin1 == 0)
+ timer_uses_ioapic_pin_0 = 1;
+
printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
vector, apic1, pin1, apic2, pin2);
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 34d21e21e012..6b1392d33ed5 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -1130,7 +1130,17 @@ int mp_register_gsi (u32 gsi, int triggering, int polarity)
*/
int irq = gsi;
if (gsi < MAX_GSI_NUM) {
- if (gsi > 15)
+ /*
+ * Retain the VIA chipset work-around (gsi > 15), but
+ * avoid a problem where the 8254 timer (IRQ0) is setup
+ * via an override (so it's not on pin 0 of the ioapic),
+ * and at the same time, the pin 0 interrupt is a PCI
+ * type. The gsi > 15 test could cause these two pins
+ * to be shared as IRQ0, and they are not shareable.
+ * So test for this condition, and if necessary, avoid
+ * the pin collision.
+ */
+ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
gsi = pci_irq++;
/*
* Don't assign IRQ used by ACPI SCI
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index 46c9331e7ab5..9e534d52b1d5 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -6,7 +6,9 @@
* in1: source address
* in2: number of bytes to copy
* Output:
- * 0 if success, or number of byte NOT copied if error occurred.
+ * for memcpy: return dest
+ * for copy_user: return 0 if success,
+ * or number of byte NOT copied if error occurred.
*
* Copyright (C) 2002 Intel Corp.
* Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
@@ -73,6 +75,7 @@ GLOBAL_ENTRY(memcpy)
and r28=0x7,in0
and r29=0x7,in1
mov f6=f0
+ mov retval=in0
br.cond.sptk .common_code
;;
END(memcpy)
@@ -84,7 +87,7 @@ GLOBAL_ENTRY(__copy_user)
mov f6=f1
mov saved_in0=in0 // save dest pointer
mov saved_in1=in1 // save src pointer
- mov saved_in2=in2 // save len
+ mov retval=r0 // initialize return value
;;
.common_code:
cmp.gt p15,p0=8,in2 // check for small size
@@ -92,7 +95,7 @@ GLOBAL_ENTRY(__copy_user)
cmp.ne p14,p0=0,r29 // check src alignment
add src0=0,in1
sub r30=8,r28 // for .align_dest
- mov retval=r0 // initialize return value
+ mov saved_in2=in2 // save len
;;
add dst0=0,in0
add dst1=1,in0 // dest odd index
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
index ec53c7d65f2b..7a2f20583be4 100644
--- a/arch/ppc/kernel/head_8xx.S
+++ b/arch/ppc/kernel/head_8xx.S
@@ -355,9 +355,7 @@ InstructionTLBMiss:
. = 0x1200
DataStoreTLBMiss:
-#ifdef CONFIG_8xx_CPU6
stw r3, 8(r0)
-#endif
DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
@@ -417,9 +415,7 @@ DataStoreTLBMiss:
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
-#endif
rfi
/* This is an instruction TLB error on the MPC8xx. This could be due
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 62776c07cff1..222b5b46d2b2 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -76,6 +76,12 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
*addrp = __pa_symbol(&_end);
return 1;
}
+
+ if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
+ *addrp = ebda_addr + ebda_size;
+ return 1;
+ }
+
/* XXX ramdisk image here? */
return 0;
}
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 77b4c608cca0..0de3ea938830 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -1777,6 +1777,8 @@ static inline void unlock_ExtINT_logic(void)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
+int timer_uses_ioapic_pin_0;
+
/*
* This code may look a bit paranoid, but it's supposed to cooperate with
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
@@ -1814,6 +1816,9 @@ static inline void check_timer(void)
pin2 = ioapic_i8259.pin;
apic2 = ioapic_i8259.apic;
+ if (pin1 == 0)
+ timer_uses_ioapic_pin_0 = 1;
+
apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
vector, apic1, pin1, apic2, pin2);
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index b17cf3eba359..083da7e606b1 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -968,7 +968,17 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
*/
int irq = gsi;
if (gsi < MAX_GSI_NUM) {
- if (gsi > 15)
+ /*
+ * Retain the VIA chipset work-around (gsi > 15), but
+ * avoid a problem where the 8254 timer (IRQ0) is setup
+ * via an override (so it's not on pin 0 of the ioapic),
+ * and at the same time, the pin 0 interrupt is a PCI
+ * type. The gsi > 15 test could cause these two pins
+ * to be shared as IRQ0, and they are not shareable.
+ * So test for this condition, and if necessary, avoid
+ * the pin collision.
+ */
+ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
gsi = pci_irq++;
/*
* Don't assign IRQ used by ACPI SCI
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 9d3d76c85ae7..2480d3f08a47 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -639,6 +639,14 @@ static int __init pci_iommu_init(void)
return -1;
}
+ i = 0;
+ for_all_nb(dev)
+ i++;
+ if (i > MAX_NB) {
+ printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i);
+ return -1;
+ }
+
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index ebc3c33b1c6c..f0870bef24d1 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -571,17 +571,28 @@ static inline void copy_edd(void)
#endif
#define EBDA_ADDR_POINTER 0x40E
-static void __init reserve_ebda_region(void)
+
+unsigned __initdata ebda_addr;
+unsigned __initdata ebda_size;
+
+static void discover_ebda(void)
{
- unsigned int addr;
- /**
+ /*
* there is a real-mode segmented pointer pointing to the
* 4K EBDA area at 0x40E
*/
- addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
- addr <<= 4;
- if (addr)
- reserve_bootmem_generic(addr, PAGE_SIZE);
+ ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
+ ebda_addr <<= 4;
+
+ ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
+
+ /* Round EBDA up to pages */
+ if (ebda_size == 0)
+ ebda_size = 1;
+ ebda_size <<= 10;
+ ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
+ if (ebda_size > 64*1024)
+ ebda_size = 64*1024;
}
void __init setup_arch(char **cmdline_p)
@@ -627,6 +638,8 @@ void __init setup_arch(char **cmdline_p)
check_efer();
+ discover_ebda();
+
init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
dmi_scan_machine();
@@ -669,7 +682,8 @@ void __init setup_arch(char **cmdline_p)
reserve_bootmem_generic(0, PAGE_SIZE);
/* reserve ebda region */
- reserve_ebda_region();
+ if (ebda_addr)
+ reserve_bootmem_generic(ebda_addr, ebda_size);
#ifdef CONFIG_SMP
/*
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 2700b1375c1f..6b87268c5c2e 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -385,6 +385,7 @@ void out_of_line_bug(void)
static DEFINE_SPINLOCK(die_lock);
static int die_owner = -1;
+static unsigned int die_nest_count;
unsigned __kprobes long oops_begin(void)
{
@@ -399,6 +400,7 @@ unsigned __kprobes long oops_begin(void)
else
spin_lock(&die_lock);
}
+ die_nest_count++;
die_owner = cpu;
console_verbose();
bust_spinlocks(1);
@@ -409,7 +411,13 @@ void __kprobes oops_end(unsigned long flags)
{
die_owner = -1;
bust_spinlocks(0);
- spin_unlock_irqrestore(&die_lock, flags);
+ die_nest_count--;
+ if (die_nest_count)
+ /* We still own the lock */
+ local_irq_restore(flags);
+ else
+ /* Nest count reaches zero, release the lock. */
+ spin_unlock_irqrestore(&die_lock, flags);
if (panic_on_oops)
panic("Oops");
}
@@ -464,6 +472,8 @@ void __kprobes die_nmi(char *str, struct pt_regs *regs)
panic("nmi watchdog");
printk("console shuts up ...\n");
oops_end(flags);
+ nmi_exit();
+ local_irq_enable();
do_exit(SIGSEGV);
}
diff --git a/block/elevator.c b/block/elevator.c
index 29825792cbd5..8768a367fdde 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -333,6 +333,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
{
struct list_head *pos;
unsigned ordseq;
+ int unplug_it = 1;
blk_add_trace_rq(q, rq, BLK_TA_INSERT);
@@ -399,6 +400,11 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
}
list_add_tail(&rq->queuelist, pos);
+ /*
+ * most requeues happen because of a busy condition, don't
+ * force unplug of the queue for that case.
+ */
+ unplug_it = 0;
break;
default:
@@ -407,7 +413,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
BUG();
}
- if (blk_queue_plugged(q)) {
+ if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[READ] + q->rq.count[WRITE]
- q->in_flight;
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e5041a02e21f..eac48bec1479 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1732,8 +1732,21 @@ void blk_run_queue(struct request_queue *q)
spin_lock_irqsave(q->queue_lock, flags);
blk_remove_plug(q);
- if (!elv_queue_empty(q))
- q->request_fn(q);
+
+ /*
+ * Only recurse once to avoid overrunning the stack, let the unplug
+ * handling reinvoke the handler shortly if we already got there.
+ */
+ if (!elv_queue_empty(q)) {
+ if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ q->request_fn(q);
+ clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+ } else {
+ blk_plug_device(q);
+ kblockd_schedule_work(&q->unplug_work);
+ }
+ }
+
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 0e71dff327cd..b1ea4df85c7d 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -456,6 +456,35 @@ static void class_device_remove_attrs(struct class_device * cd)
}
}
+static int class_device_add_groups(struct class_device * cd)
+{
+ int i;
+ int error = 0;
+
+ if (cd->groups) {
+ for (i = 0; cd->groups[i]; i++) {
+ error = sysfs_create_group(&cd->kobj, cd->groups[i]);
+ if (error) {
+ while (--i >= 0)
+ sysfs_remove_group(&cd->kobj, cd->groups[i]);
+ goto out;
+ }
+ }
+ }
+out:
+ return error;
+}
+
+static void class_device_remove_groups(struct class_device * cd)
+{
+ int i;
+ if (cd->groups) {
+ for (i = 0; cd->groups[i]; i++) {
+ sysfs_remove_group(&cd->kobj, cd->groups[i]);
+ }
+ }
+}
+
static ssize_t show_dev(struct class_device *class_dev, char *buf)
{
return print_dev_t(buf, class_dev->devt);
@@ -559,6 +588,8 @@ int class_device_add(struct class_device *class_dev)
class_name);
}
+ class_device_add_groups(class_dev);
+
kobject_uevent(&class_dev->kobj, KOBJ_ADD);
/* notify any interfaces this device is now here */
@@ -672,6 +703,7 @@ void class_device_del(struct class_device *class_dev)
if (class_dev->devt_attr)
class_device_remove_file(class_dev, class_dev->devt_attr);
class_device_remove_attrs(class_dev);
+ class_device_remove_groups(class_dev);
kobject_uevent(&class_dev->kobj, KOBJ_REMOVE);
kobject_del(&class_dev->kobj);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 02114a0bd0d9..128b2632512d 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1981,10 +1981,6 @@ static int __init cmm_init(void)
if (!cmm_class)
return -1;
- rc = pcmcia_register_driver(&cm4000_driver);
- if (rc < 0)
- return rc;
-
major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
if (major < 0) {
printk(KERN_WARNING MODULE_NAME
@@ -1992,6 +1988,12 @@ static int __init cmm_init(void)
return -1;
}
+ rc = pcmcia_register_driver(&cm4000_driver);
+ if (rc < 0) {
+ unregister_chrdev(major, DEVICE_NAME);
+ return rc;
+ }
+
return 0;
}
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 29efa64580a8..47a8465bf95b 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -724,16 +724,19 @@ static int __init cm4040_init(void)
if (!cmx_class)
return -1;
- rc = pcmcia_register_driver(&reader_driver);
- if (rc < 0)
- return rc;
-
major = register_chrdev(0, DEVICE_NAME, &reader_fops);
if (major < 0) {
printk(KERN_WARNING MODULE_NAME
": could not get major number\n");
return -1;
}
+
+ rc = pcmcia_register_driver(&reader_driver);
+ if (rc < 0) {
+ unregister_chrdev(major, DEVICE_NAME);
+ return rc;
+ }
+
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 956d121cb161..3e6ffcaa5af4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -74,6 +74,8 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
static DEFINE_MUTEX (dbs_mutex);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
+static struct workqueue_struct *dbs_workq;
+
struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
@@ -364,23 +366,29 @@ static void do_dbs_timer(void *data)
mutex_lock(&dbs_mutex);
for_each_online_cpu(i)
dbs_check_cpu(i);
- schedule_delayed_work(&dbs_work,
- usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ queue_delayed_work(dbs_workq, &dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
}
static inline void dbs_timer_init(void)
{
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
- schedule_delayed_work(&dbs_work,
- usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ if (!dbs_workq)
+ dbs_workq = create_singlethread_workqueue("ondemand");
+ if (!dbs_workq) {
+ printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n");
+ return;
+ }
+ queue_delayed_work(dbs_workq, &dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
}
static inline void dbs_timer_exit(void)
{
- cancel_delayed_work(&dbs_work);
- return;
+ if (dbs_workq)
+ cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work);
}
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -489,8 +497,12 @@ static int __init cpufreq_gov_dbs_init(void)
static void __exit cpufreq_gov_dbs_exit(void)
{
- /* Make sure that the scheduled work is indeed not running */
- flush_scheduled_work();
+ /* Make sure that the scheduled work is indeed not running.
+ Assumes the timer has been cancelled first. */
+ if (dbs_workq) {
+ flush_workqueue(dbs_workq);
+ destroy_workqueue(dbs_workq);
+ }
cpufreq_unregister_governor(&cpufreq_gov_dbs);
}
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 15121cb5a1f6..21f9282c1b25 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
switch (width) {
case 4:
ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >>
- (offset % 4)) & 0xf);
+ (4 - (offset % 8))) & 0xf);
break;
case 8:
ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 312cf90731ea..205854e9c662 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
spin_lock(&dev->cq_table.lock);
cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
-
if (cq)
- atomic_inc(&cq->refcount);
+ ++cq->refcount;
+
spin_unlock(&dev->cq_table.lock);
if (!cq) {
@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
- if (atomic_dec_and_test(&cq->refcount))
+ spin_lock(&dev->cq_table.lock);
+ if (!--cq->refcount)
wake_up(&cq->wait);
+ spin_unlock(&dev->cq_table.lock);
}
static inline int is_recv_cqe(struct mthca_cqe *cqe)
@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe)
return !(cqe->is_send & 0x80);
}
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
struct mthca_srq *srq)
{
- struct mthca_cq *cq;
struct mthca_cqe *cqe;
u32 prod_index;
int nfreed = 0;
- spin_lock_irq(&dev->cq_table.lock);
- cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
- spin_unlock_irq(&dev->cq_table.lock);
-
- if (!cq)
- return;
-
spin_lock_irq(&cq->lock);
/*
@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
if (0)
mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
- qpn, cqn, cq->cons_index, prod_index);
+ qpn, cq->cqn, cq->cons_index, prod_index);
/*
* Now sweep backwards through the CQ, removing CQ entries
@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
}
spin_unlock_irq(&cq->lock);
- if (atomic_dec_and_test(&cq->refcount))
- wake_up(&cq->wait);
}
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
spin_lock_init(&cq->lock);
- atomic_set(&cq->refcount, 1);
+ cq->refcount = 1;
init_waitqueue_head(&cq->wait);
memset(cq_context, 0, sizeof *cq_context);
@@ -896,6 +886,17 @@ err_out:
return err;
}
+static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
+{
+ int c;
+
+ spin_lock_irq(&dev->cq_table.lock);
+ c = cq->refcount;
+ spin_unlock_irq(&dev->cq_table.lock);
+
+ return c;
+}
+
void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq)
{
@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev,
spin_lock_irq(&dev->cq_table.lock);
mthca_array_clear(&dev->cq_table.cq,
cq->cqn & (dev->limits.num_cqs - 1));
+ --cq->refcount;
spin_unlock_irq(&dev->cq_table.lock);
if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev,
else
synchronize_irq(dev->pdev->irq);
- atomic_dec(&cq->refcount);
- wait_event(cq->wait, !atomic_read(&cq->refcount));
+ wait_event(cq->wait, !get_cq_refcount(dev, cq));
if (cq->is_kernel) {
mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 4c1dcb4c1822..f8160b8de090 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev,
void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
enum ib_event_type event_type);
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
struct mthca_srq *srq);
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 25e1c1db9a40..a486dec1707e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
int __devinit mthca_init_mr_table(struct mthca_dev *dev)
{
+ unsigned long addr;
int err, i;
err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
goto err_fmr_mpt;
}
+ addr = pci_resource_start(dev->pdev, 4) +
+ ((pci_resource_len(dev->pdev, 4) - 1) &
+ dev->mr_table.mpt_base);
+
dev->mr_table.tavor_fmr.mpt_base =
- ioremap(dev->mr_table.mpt_base,
- (1 << i) * sizeof (struct mthca_mpt_entry));
+ ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry));
if (!dev->mr_table.tavor_fmr.mpt_base) {
mthca_warn(dev, "MPT ioremap for FMR failed.\n");
@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
goto err_fmr_mpt;
}
+ addr = pci_resource_start(dev->pdev, 4) +
+ ((pci_resource_len(dev->pdev, 4) - 1) &
+ dev->mr_table.mtt_base);
+
dev->mr_table.tavor_fmr.mtt_base =
- ioremap(dev->mr_table.mtt_base,
- (1 << i) * MTHCA_MTT_SEG_SIZE);
+ ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE);
if (!dev->mr_table.tavor_fmr.mtt_base) {
mthca_warn(dev, "MTT ioremap for FMR failed.\n");
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 6676a786d690..179a8f610d0f 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -139,11 +139,12 @@ struct mthca_ah {
* a qp may be locked, with the send cq locked first. No other
* nesting should be done.
*
- * Each struct mthca_cq/qp also has an atomic_t ref count. The
- * pointer from the cq/qp_table to the struct counts as one reference.
- * This reference also is good for access through the consumer API, so
- * modifying the CQ/QP etc doesn't need to take another reference.
- * Access because of a completion being polled does need a reference.
+ * Each struct mthca_cq/qp also has an ref count, protected by the
+ * corresponding table lock. The pointer from the cq/qp_table to the
+ * struct counts as one reference. This reference also is good for
+ * access through the consumer API, so modifying the CQ/QP etc doesn't
+ * need to take another reference. Access to a QP because of a
+ * completion being polled does not need a reference either.
*
* Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
* destroy function to sleep on.
@@ -159,8 +160,9 @@ struct mthca_ah {
* - decrement ref count; if zero, wake up waiters
*
* To destroy a CQ/QP, we can do the following:
- * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
- * - decrement ref count
+ * - lock cq/qp_table
+ * - remove pointer and decrement ref count
+ * - unlock cq/qp_table lock
* - wait_event until ref count is zero
*
* It is the consumer's responsibilty to make sure that no QP
@@ -197,7 +199,7 @@ struct mthca_cq_resize {
struct mthca_cq {
struct ib_cq ibcq;
spinlock_t lock;
- atomic_t refcount;
+ int refcount;
int cqn;
u32 cons_index;
struct mthca_cq_buf buf;
@@ -217,7 +219,7 @@ struct mthca_cq {
struct mthca_srq {
struct ib_srq ibsrq;
spinlock_t lock;
- atomic_t refcount;
+ int refcount;
int srqn;
int max;
int max_gs;
@@ -254,7 +256,7 @@ struct mthca_wq {
struct mthca_qp {
struct ib_qp ibqp;
- atomic_t refcount;
+ int refcount;
u32 qpn;
int is_direct;
u8 port; /* for SQP and memfree use only */
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f37b0e367323..19765f6f8d58 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
spin_lock(&dev->qp_table.lock);
qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
if (qp)
- atomic_inc(&qp->refcount);
+ ++qp->refcount;
spin_unlock(&dev->qp_table.lock);
if (!qp) {
@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
- if (atomic_dec_and_test(&qp->refcount))
+ spin_lock(&dev->qp_table.lock);
+ if (!--qp->refcount)
wake_up(&qp->wait);
+ spin_unlock(&dev->qp_table.lock);
}
static int to_mthca_state(enum ib_qp_state ib_state)
@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
* entries and reinitialize the QP.
*/
if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
- mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
- mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
mthca_wq_init(&qp->sq);
@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
int ret;
int i;
- atomic_set(&qp->refcount, 1);
+ qp->refcount = 1;
init_waitqueue_head(&qp->wait);
qp->state = IB_QPS_RESET;
qp->atomic_rd_en = 0;
@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
return err;
}
+static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
+{
+ int c;
+
+ spin_lock_irq(&dev->qp_table.lock);
+ c = qp->refcount;
+ spin_unlock_irq(&dev->qp_table.lock);
+
+ return c;
+}
+
void mthca_free_qp(struct mthca_dev *dev,
struct mthca_qp *qp)
{
@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev,
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp,
qp->qpn & (dev->limits.num_qps - 1));
+ --qp->refcount;
spin_unlock(&dev->qp_table.lock);
if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
- atomic_dec(&qp->refcount);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
+ wait_event(qp->wait, !get_qp_refcount(dev, qp));
if (qp->state != IB_QPS_RESET)
mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev,
* unref the mem-free tables and free the QPN in our table.
*/
if (!qp->ibqp.uobject) {
- mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
- mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
mthca_free_memfree(dev, qp);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index adcaf85355ae..1ea433291fa7 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
goto err_out_mailbox;
spin_lock_init(&srq->lock);
- atomic_set(&srq->refcount, 1);
+ srq->refcount = 1;
init_waitqueue_head(&srq->wait);
if (mthca_is_memfree(dev))
@@ -308,6 +308,17 @@ err_out:
return err;
}
+static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
+{
+ int c;
+
+ spin_lock_irq(&dev->srq_table.lock);
+ c = srq->refcount;
+ spin_unlock_irq(&dev->srq_table.lock);
+
+ return c;
+}
+
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
{
struct mthca_mailbox *mailbox;
@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
spin_lock_irq(&dev->srq_table.lock);
mthca_array_clear(&dev->srq_table.srq,
srq->srqn & (dev->limits.num_srqs - 1));
+ --srq->refcount;
spin_unlock_irq(&dev->srq_table.lock);
- atomic_dec(&srq->refcount);
- wait_event(srq->wait, !atomic_read(&srq->refcount));
+ wait_event(srq->wait, !get_srq_refcount(dev, srq));
if (!srq->ibsrq.uobject) {
mthca_free_srq_buf(dev, srq);
@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
spin_lock(&dev->srq_table.lock);
srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
if (srq)
- atomic_inc(&srq->refcount);
+ ++srq->refcount;
spin_unlock(&dev->srq_table.lock);
if (!srq) {
@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
out:
- if (atomic_dec_and_test(&srq->refcount))
+ spin_lock(&dev->srq_table.lock);
+ if (!--srq->refcount)
wake_up(&srq->wait);
+ spin_unlock(&dev->srq_table.lock);
}
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 4ca175553f9f..f887780e8093 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if (priv->pkey == pkey) {
unregister_netdev(priv->dev);
ipoib_dev_cleanup(priv->dev);
-
list_del(&priv->list);
-
- kfree(priv);
+ free_netdev(priv->dev);
ret = 0;
break;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 5bb55742ada6..c32ce4348e1b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target)
}
}
+static void srp_unmap_data(struct scsi_cmnd *scmnd,
+ struct srp_target_port *target,
+ struct srp_request *req)
+{
+ struct scatterlist *scat;
+ int nents;
+
+ if (!scmnd->request_buffer ||
+ (scmnd->sc_data_direction != DMA_TO_DEVICE &&
+ scmnd->sc_data_direction != DMA_FROM_DEVICE))
+ return;
+
+ /*
+ * This handling of non-SG commands can be killed when the
+ * SCSI midlayer no longer generates non-SG commands.
+ */
+ if (likely(scmnd->use_sg)) {
+ nents = scmnd->use_sg;
+ scat = scmnd->request_buffer;
+ } else {
+ nents = 1;
+ scat = &req->fake_sg;
+ }
+
+ dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
+ scmnd->sc_data_direction);
+}
+
static int srp_reconnect_target(struct srp_target_port *target)
{
struct ib_cm_id *new_cm_id;
@@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target)
list_for_each_entry(req, &target->req_queue, list) {
req->scmnd->result = DID_RESET << 16;
req->scmnd->scsi_done(req->scmnd);
+ srp_unmap_data(req->scmnd, target, req);
}
target->rx_head = 0;
target->tx_head = 0;
target->tx_tail = 0;
- target->req_head = 0;
- for (i = 0; i < SRP_SQ_SIZE - 1; ++i)
- target->req_ring[i].next = i + 1;
- target->req_ring[SRP_SQ_SIZE - 1].next = -1;
+ INIT_LIST_HEAD(&target->free_reqs);
INIT_LIST_HEAD(&target->req_queue);
+ for (i = 0; i < SRP_SQ_SIZE; ++i)
+ list_add_tail(&target->req_ring[i].list, &target->free_reqs);
ret = srp_connect_target(target);
if (ret)
@@ -589,40 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return len;
}
-static void srp_unmap_data(struct scsi_cmnd *scmnd,
- struct srp_target_port *target,
- struct srp_request *req)
-{
- struct scatterlist *scat;
- int nents;
-
- if (!scmnd->request_buffer ||
- (scmnd->sc_data_direction != DMA_TO_DEVICE &&
- scmnd->sc_data_direction != DMA_FROM_DEVICE))
- return;
-
- /*
- * This handling of non-SG commands can be killed when the
- * SCSI midlayer no longer generates non-SG commands.
- */
- if (likely(scmnd->use_sg)) {
- nents = scmnd->use_sg;
- scat = scmnd->request_buffer;
- } else {
- nents = 1;
- scat = &req->fake_sg;
- }
-
- dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
- scmnd->sc_data_direction);
-}
-
-static void srp_remove_req(struct srp_target_port *target, struct srp_request *req,
- int index)
+static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
{
- list_del(&req->list);
- req->next = target->req_head;
- target->req_head = index;
+ srp_unmap_data(req->scmnd, target, req);
+ list_move_tail(&req->list, &target->free_reqs);
}
static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -647,7 +645,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
req->tsk_status = rsp->data[3];
complete(&req->done);
} else {
- scmnd = req->scmnd;
+ scmnd = req->scmnd;
if (!scmnd)
printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
(unsigned long long) rsp->tag);
@@ -665,14 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
- srp_unmap_data(scmnd, target, req);
-
if (!req->tsk_mgmt) {
- req->scmnd = NULL;
scmnd->host_scribble = (void *) -1L;
scmnd->scsi_done(scmnd);
- srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT);
+ srp_remove_req(target, req);
} else
req->cmd_done = 1;
}
@@ -859,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
- long req_index;
int len;
if (target->state == SRP_TARGET_CONNECTING)
@@ -879,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma,
SRP_MAX_IU_LEN, DMA_TO_DEVICE);
- req_index = target->req_head;
+ req = list_entry(target->free_reqs.next, struct srp_request, list);
scmnd->scsi_done = done;
scmnd->result = 0;
- scmnd->host_scribble = (void *) req_index;
+ scmnd->host_scribble = (void *) (long) req->index;
cmd = iu->buf;
memset(cmd, 0, sizeof *cmd);
cmd->opcode = SRP_CMD;
cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
- cmd->tag = req_index;
+ cmd->tag = req->index;
memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
- req = &target->req_ring[req_index];
-
req->scmnd = scmnd;
req->cmd = iu;
req->cmd_done = 0;
@@ -919,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
goto err_unmap;
}
- target->req_head = req->next;
- list_add_tail(&req->list, &target->req_queue);
+ list_move_tail(&req->list, &target->req_queue);
return 0;
@@ -1143,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
-static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
+static int srp_send_tsk_mgmt(struct srp_target_port *target,
+ struct srp_request *req, u8 func)
{
- struct srp_target_port *target = host_to_target(scmnd->device->host);
- struct srp_request *req;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
- int req_index;
- int ret = FAILED;
spin_lock_irq(target->scsi_host->host_lock);
if (target->state == SRP_TARGET_DEAD ||
target->state == SRP_TARGET_REMOVED) {
- scmnd->result = DID_BAD_TARGET << 16;
+ req->scmnd->result = DID_BAD_TARGET << 16;
goto out;
}
- if (scmnd->host_scribble == (void *) -1L)
- goto out;
-
- req_index = (long) scmnd->host_scribble;
- printk(KERN_ERR "Abort for req_index %d\n", req_index);
-
- req = &target->req_ring[req_index];
init_completion(&req->done);
iu = __srp_get_tx_iu(target);
@@ -1177,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
- tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT;
+ tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
+ tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
tsk_mgmt->tsk_mgmt_func = func;
- tsk_mgmt->task_tag = req_index;
+ tsk_mgmt->task_tag = req->index;
if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
goto out;
@@ -1188,37 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
req->tsk_mgmt = iu;
spin_unlock_irq(target->scsi_host->host_lock);
+
if (!wait_for_completion_timeout(&req->done,
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
- return FAILED;
- spin_lock_irq(target->scsi_host->host_lock);
+ return -1;
- if (req->cmd_done) {
- srp_remove_req(target, req, req_index);
- scmnd->scsi_done(scmnd);
- } else if (!req->tsk_status) {
- srp_remove_req(target, req, req_index);
- scmnd->result = DID_ABORT << 16;
- ret = SUCCESS;
- }
+ return 0;
out:
spin_unlock_irq(target->scsi_host->host_lock);
- return ret;
+ return -1;
+}
+
+static int srp_find_req(struct srp_target_port *target,
+ struct scsi_cmnd *scmnd,
+ struct srp_request **req)
+{
+ if (scmnd->host_scribble == (void *) -1L)
+ return -1;
+
+ *req = &target->req_ring[(long) scmnd->host_scribble];
+
+ return 0;
}
static int srp_abort(struct scsi_cmnd *scmnd)
{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req;
+ int ret = SUCCESS;
+
printk(KERN_ERR "SRP abort called\n");
- return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK);
+ if (srp_find_req(target, scmnd, &req))
+ return FAILED;
+ if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
+ return FAILED;
+
+ spin_lock_irq(target->scsi_host->host_lock);
+
+ if (req->cmd_done) {
+ srp_remove_req(target, req);
+ scmnd->scsi_done(scmnd);
+ } else if (!req->tsk_status) {
+ srp_remove_req(target, req);
+ scmnd->result = DID_ABORT << 16;
+ } else
+ ret = FAILED;
+
+ spin_unlock_irq(target->scsi_host->host_lock);
+
+ return ret;
}
static int srp_reset_device(struct scsi_cmnd *scmnd)
{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req, *tmp;
+
printk(KERN_ERR "SRP reset_device called\n");
- return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET);
+ if (srp_find_req(target, scmnd, &req))
+ return FAILED;
+ if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
+ return FAILED;
+ if (req->tsk_status)
+ return FAILED;
+
+ spin_lock_irq(target->scsi_host->host_lock);
+
+ list_for_each_entry_safe(req, tmp, &target->req_queue, list)
+ if (req->scmnd->device == scmnd->device) {
+ req->scmnd->result = DID_RESET << 16;
+ scmnd->scsi_done(scmnd);
+ srp_remove_req(target, req);
+ }
+
+ spin_unlock_irq(target->scsi_host->host_lock);
+
+ return SUCCESS;
}
static int srp_reset_host(struct scsi_cmnd *scmnd)
@@ -1518,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev,
INIT_WORK(&target->work, srp_reconnect_work, target);
- for (i = 0; i < SRP_SQ_SIZE - 1; ++i)
- target->req_ring[i].next = i + 1;
- target->req_ring[SRP_SQ_SIZE - 1].next = -1;
+ INIT_LIST_HEAD(&target->free_reqs);
INIT_LIST_HEAD(&target->req_queue);
+ for (i = 0; i < SRP_SQ_SIZE; ++i) {
+ target->req_ring[i].index = i;
+ list_add_tail(&target->req_ring[i].list, &target->free_reqs);
+ }
ret = srp_parse_options(buf, target);
if (ret)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index bd7f7c3115de..c5cd43aae860 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -101,7 +101,7 @@ struct srp_request {
*/
struct scatterlist fake_sg;
struct completion done;
- short next;
+ short index;
u8 cmd_done;
u8 tsk_status;
};
@@ -133,7 +133,7 @@ struct srp_target_port {
unsigned tx_tail;
struct srp_iu *tx_ring[SRP_SQ_SIZE + 1];
- int req_head;
+ struct list_head free_reqs;
struct list_head req_queue;
struct srp_request req_ring[SRP_SQ_SIZE];
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 266414ca2814..9080853fe283 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1189,7 +1189,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->diagPending = 0;
spin_lock_init(&ioc->diagLock);
spin_lock_init(&ioc->fc_rescan_work_lock);
- spin_lock_init(&ioc->fc_rport_lock);
spin_lock_init(&ioc->initializing_hba_lock);
/* Initialize the event logging.
@@ -5736,11 +5735,13 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
return rc;
}
+# define EVENT_DESCR_STR_SZ 100
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static void
EventDescriptionStr(u8 event, u32 evData0, char *evStr)
{
- char *ds;
+ char *ds = NULL;
switch(event) {
case MPI_EVENT_NONE:
@@ -5777,9 +5778,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
ds = "Loop State(LIP) Change";
else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
- ds = "Loop State(LPE) Change"; /* ??? */
+ ds = "Loop State(LPE) Change"; /* ??? */
else
- ds = "Loop State(LPB) Change"; /* ??? */
+ ds = "Loop State(LPB) Change"; /* ??? */
break;
case MPI_EVENT_LOGOUT:
ds = "Logout";
@@ -5841,27 +5842,32 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
break;
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
{
- char buf[50];
u8 id = (u8)(evData0);
u8 ReasonCode = (u8)(evData0 >> 16);
switch (ReasonCode) {
case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
- sprintf(buf,"SAS Device Status Change: Added: id=%d", id);
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Added: id=%d", id);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
- sprintf(buf,"SAS Device Status Change: Deleted: id=%d", id);
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Deleted: id=%d", id);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
- sprintf(buf,"SAS Device Status Change: SMART Data: id=%d", id);
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: SMART Data: id=%d",
+ id);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
- sprintf(buf,"SAS Device Status Change: No Persistancy Added: id=%d", id);
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: No Persistancy "
+ "Added: id=%d", id);
break;
default:
- sprintf(buf,"SAS Device Status Change: Unknown: id=%d", id);
- break;
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Unknown: id=%d", id);
+ break;
}
- ds = buf;
break;
}
case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
@@ -5878,41 +5884,46 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
break;
case MPI_EVENT_SAS_PHY_LINK_STATUS:
{
- char buf[50];
u8 LinkRates = (u8)(evData0 >> 8);
u8 PhyNumber = (u8)(evData0);
LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >>
MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT;
switch (LinkRates) {
case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN:
- sprintf(buf,"SAS PHY Link Status: Phy=%d:"
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
" Rate Unknown",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED:
- sprintf(buf,"SAS PHY Link Status: Phy=%d:"
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
" Phy Disabled",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION:
- sprintf(buf,"SAS PHY Link Status: Phy=%d:"
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
" Failed Speed Nego",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE:
- sprintf(buf,"SAS PHY Link Status: Phy=%d:"
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
" Sata OOB Completed",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_1_5:
- sprintf(buf,"SAS PHY Link Status: Phy=%d:"
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
" Rate 1.5 Gbps",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
- sprintf(buf,"SAS PHY Link Status: Phy=%d:"
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
" Rate 3.0 Gpbs",PhyNumber);
break;
default:
- sprintf(buf,"SAS PHY Link Status: Phy=%d", PhyNumber);
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d", PhyNumber);
break;
}
- ds = buf;
break;
}
case MPI_EVENT_SAS_DISCOVERY_ERROR:
@@ -5921,9 +5932,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
case MPI_EVENT_IR_RESYNC_UPDATE:
{
u8 resync_complete = (u8)(evData0 >> 16);
- char buf[40];
- sprintf(buf,"IR Resync Update: Complete = %d:",resync_complete);
- ds = buf;
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR Resync Update: Complete = %d:",resync_complete);
break;
}
case MPI_EVENT_IR2:
@@ -5976,7 +5986,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
ds = "Unknown";
break;
}
- strcpy(evStr,ds);
+ if (ds)
+ strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5998,7 +6009,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
int ii;
int r = 0;
int handlers = 0;
- char evStr[100];
+ char evStr[EVENT_DESCR_STR_SZ];
u8 event;
/*
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index be7e8501b53c..f673cca507e1 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.03.08"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.08"
+#define MPT_LINUX_VERSION_COMMON "3.03.09"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.09"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -489,7 +489,6 @@ typedef struct _RaidCfgData {
#define MPT_RPORT_INFO_FLAGS_REGISTERED 0x01 /* rport registered */
#define MPT_RPORT_INFO_FLAGS_MISSING 0x02 /* missing from DevPage0 scan */
-#define MPT_RPORT_INFO_FLAGS_MAPPED_VDEV 0x04 /* target mapped in vdev */
/*
* data allocated for each fc rport device
@@ -501,7 +500,6 @@ struct mptfc_rport_info
struct scsi_target *starget;
FCDevicePage0_t pg0;
u8 flags;
- u8 remap_needed;
};
/*
@@ -628,11 +626,11 @@ typedef struct _MPT_ADAPTER
struct work_struct mptscsih_persistTask;
struct list_head fc_rports;
- spinlock_t fc_rport_lock; /* list and ri flags */
spinlock_t fc_rescan_work_lock;
int fc_rescan_work_count;
struct work_struct fc_rescan_work;
-
+ char fc_rescan_work_q_name[KOBJ_NAME_LEN];
+ struct workqueue_struct *fc_rescan_work_q;
} MPT_ADAPTER;
/*
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b343f2a68b1c..856487741ef4 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -341,9 +341,6 @@ mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid)
rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low;
rid->port_id = pg0->PortIdentifier;
rid->roles = FC_RPORT_ROLE_UNKNOWN;
- rid->roles |= FC_RPORT_ROLE_FCP_TARGET;
- if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
- rid->roles |= FC_RPORT_ROLE_FCP_INITIATOR;
return 0;
}
@@ -355,15 +352,18 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
struct fc_rport *rport;
struct mptfc_rport_info *ri;
int new_ri = 1;
- u64 pn;
- unsigned long flags;
+ u64 pn, nn;
VirtTarget *vtarget;
+ u32 roles = FC_RPORT_ROLE_UNKNOWN;
if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0)
return;
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+
/* scan list looking for a match */
- spin_lock_irqsave(&ioc->fc_rport_lock, flags);
list_for_each_entry(ri, &ioc->fc_rports, list) {
pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
if (pn == rport_ids.port_name) { /* match */
@@ -373,11 +373,9 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
}
}
if (new_ri) { /* allocate one */
- spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL);
if (!ri)
return;
- spin_lock_irqsave(&ioc->fc_rport_lock, flags);
list_add_tail(&ri->list, &ioc->fc_rports);
}
@@ -387,14 +385,11 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
/* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */
if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) {
ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED;
- spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
rport = fc_remote_port_add(ioc->sh, channel, &rport_ids);
- spin_lock_irqsave(&ioc->fc_rport_lock, flags);
if (rport) {
ri->rport = rport;
if (new_ri) /* may have been reset by user */
rport->dev_loss_tmo = mptfc_dev_loss_tmo;
- *((struct mptfc_rport_info **)rport->dd_data) = ri;
/*
* if already mapped, remap here. If not mapped,
* target_alloc will allocate vtarget and map,
@@ -406,16 +401,21 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
vtarget->target_id = pg0->CurrentTargetID;
vtarget->bus_id = pg0->CurrentBus;
}
- ri->remap_needed = 0;
}
+ *((struct mptfc_rport_info **)rport->dd_data) = ri;
+ /* scan will be scheduled once rport becomes a target */
+ fc_remote_port_rolechg(rport,roles);
+
+ pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
+ nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, "
"rport tid %d, tmo %d\n",
ioc->name,
ioc->sh->host_no,
pg0->PortIdentifier,
- pg0->WWNN,
- pg0->WWPN,
+ (unsigned long long)nn,
+ (unsigned long long)pn,
pg0->CurrentTargetID,
ri->rport->scsi_target_id,
ri->rport->dev_loss_tmo));
@@ -425,8 +425,6 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
ri = NULL;
}
}
- spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
-
}
/*
@@ -476,7 +474,6 @@ mptfc_target_alloc(struct scsi_target *starget)
vtarget->target_id = ri->pg0.CurrentTargetID;
vtarget->bus_id = ri->pg0.CurrentBus;
ri->starget = starget;
- ri->remap_needed = 0;
rc = 0;
}
}
@@ -502,10 +499,10 @@ mptfc_slave_alloc(struct scsi_device *sdev)
VirtDevice *vdev;
struct scsi_target *starget;
struct fc_rport *rport;
- unsigned long flags;
- rport = starget_to_rport(scsi_target(sdev));
+ starget = scsi_target(sdev);
+ rport = starget_to_rport(starget);
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
@@ -519,10 +516,8 @@ mptfc_slave_alloc(struct scsi_device *sdev)
return -ENOMEM;
}
- spin_lock_irqsave(&hd->ioc->fc_rport_lock,flags);
sdev->hostdata = vdev;
- starget = scsi_target(sdev);
vtarget = starget->hostdata;
if (vtarget->num_luns == 0) {
@@ -535,14 +530,16 @@ mptfc_slave_alloc(struct scsi_device *sdev)
vdev->vtarget = vtarget;
vdev->lun = sdev->lun;
- spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags);
-
vtarget->num_luns++;
+
#ifdef DMPT_DEBUG_FC
- {
+ {
+ u64 nn, pn;
struct mptfc_rport_info *ri;
ri = *((struct mptfc_rport_info **)rport->dd_data);
+ pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
+ nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, "
"CurrentTargetID %d, %x %llx %llx\n",
@@ -550,7 +547,9 @@ mptfc_slave_alloc(struct scsi_device *sdev)
sdev->host->host_no,
vtarget->num_luns,
sdev->id, ri->pg0.CurrentTargetID,
- ri->pg0.PortIdentifier, ri->pg0.WWPN, ri->pg0.WWNN));
+ ri->pg0.PortIdentifier,
+ (unsigned long long)pn,
+ (unsigned long long)nn));
}
#endif
@@ -570,11 +569,31 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
done(SCpnt);
return 0;
}
+
+ /* dd_data is null until finished adding target */
ri = *((struct mptfc_rport_info **)rport->dd_data);
- if (unlikely(ri->remap_needed))
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(!ri)) {
+ dfcprintk ((MYIOC_s_INFO_FMT
+ "mptfc_qcmd.%d: %d:%d, dd_data is null.\n",
+ ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name,
+ ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no,
+ SCpnt->device->id,SCpnt->device->lun));
+ SCpnt->result = DID_IMM_RETRY << 16;
+ done(SCpnt);
+ return 0;
+ }
- return mptscsih_qcmd(SCpnt,done);
+ err = mptscsih_qcmd(SCpnt,done);
+#ifdef DMPT_DEBUG_FC
+ if (unlikely(err)) {
+ dfcprintk ((MYIOC_s_INFO_FMT
+ "mptfc_qcmd.%d: %d:%d, mptscsih_qcmd returns non-zero.\n",
+ ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name,
+ ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no,
+ SCpnt->device->id,SCpnt->device->lun));
+ }
+#endif
+ return err;
}
static void
@@ -615,18 +634,17 @@ mptfc_rescan_devices(void *arg)
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
int ii;
int work_to_do;
+ u64 pn;
unsigned long flags;
struct mptfc_rport_info *ri;
do {
/* start by tagging all ports as missing */
- spin_lock_irqsave(&ioc->fc_rport_lock,flags);
list_for_each_entry(ri, &ioc->fc_rports, list) {
if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
}
}
- spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
/*
* now rescan devices known to adapter,
@@ -639,33 +657,24 @@ mptfc_rescan_devices(void *arg)
}
/* delete devices still missing */
- spin_lock_irqsave(&ioc->fc_rport_lock, flags);
list_for_each_entry(ri, &ioc->fc_rports, list) {
/* if newly missing, delete it */
- if ((ri->flags & (MPT_RPORT_INFO_FLAGS_REGISTERED |
- MPT_RPORT_INFO_FLAGS_MISSING))
- == (MPT_RPORT_INFO_FLAGS_REGISTERED |
- MPT_RPORT_INFO_FLAGS_MISSING)) {
+ if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
MPT_RPORT_INFO_FLAGS_MISSING);
- ri->remap_needed = 1;
- fc_remote_port_delete(ri->rport);
- /*
- * remote port not really deleted 'cause
- * binding is by WWPN and driver only
- * registers FCP_TARGETs but cannot trust
- * data structures.
- */
+ fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
+
+ pn = (u64)ri->pg0.WWPN.High << 32 |
+ (u64)ri->pg0.WWPN.Low;
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_rescan.%d: %llx deleted\n",
ioc->name,
ioc->sh->host_no,
- ri->pg0.WWPN));
+ (unsigned long long)pn));
}
}
- spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
/*
* allow multiple passes as target state
@@ -870,10 +879,23 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_mptfc_probe;
}
- for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
- mptfc_init_host_attr(ioc,ii);
- mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev);
- }
+ /* initialize workqueue */
+
+ snprintf(ioc->fc_rescan_work_q_name, KOBJ_NAME_LEN, "mptfc_wq_%d",
+ sh->host_no);
+ ioc->fc_rescan_work_q =
+ create_singlethread_workqueue(ioc->fc_rescan_work_q_name);
+ if (!ioc->fc_rescan_work_q)
+ goto out_mptfc_probe;
+
+ /*
+ * scan for rports -
+ * by doing it via the workqueue, some locking is eliminated
+ */
+
+ ioc->fc_rescan_work_count = 1;
+ queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
+ flush_workqueue(ioc->fc_rescan_work_q);
return 0;
@@ -949,8 +971,18 @@ mptfc_init(void)
static void __devexit
mptfc_remove(struct pci_dev *pdev)
{
- MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
- struct mptfc_rport_info *p, *n;
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct mptfc_rport_info *p, *n;
+ struct workqueue_struct *work_q;
+ unsigned long flags;
+
+ /* destroy workqueue */
+ if ((work_q=ioc->fc_rescan_work_q)) {
+ spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
+ ioc->fc_rescan_work_q = NULL;
+ spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
+ destroy_workqueue(work_q);
+ }
fc_remove_host(ioc->sh);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index e9716b10acea..af6ec553ff7c 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -91,6 +91,7 @@ enum mptsas_hotplug_action {
MPTSAS_DEL_DEVICE,
MPTSAS_ADD_RAID,
MPTSAS_DEL_RAID,
+ MPTSAS_IGNORE_EVENT,
};
struct mptsas_hotplug_event {
@@ -298,6 +299,26 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
return rc;
}
+/*
+ * Returns true if there is a scsi end device
+ */
+static inline int
+mptsas_is_end_device(struct mptsas_devinfo * attached)
+{
+ if ((attached->handle) &&
+ (attached->device_info &
+ MPI_SAS_DEVICE_INFO_END_DEVICE) &&
+ ((attached->device_info &
+ MPI_SAS_DEVICE_INFO_SSP_TARGET) |
+ (attached->device_info &
+ MPI_SAS_DEVICE_INFO_STP_TARGET) |
+ (attached->device_info &
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
+ return 1;
+ else
+ return 0;
+}
+
static int
mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
u32 form, u32 form_specific)
@@ -872,7 +893,11 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
SasDevicePage0_t *buffer;
dma_addr_t dma_handle;
__le64 sas_address;
- int error;
+ int error=0;
+
+ if (ioc->sas_discovery_runtime &&
+ mptsas_is_end_device(device_info))
+ goto out;
hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
hdr.ExtPageLength = 0;
@@ -1009,7 +1034,11 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
CONFIGPARMS cfg;
SasExpanderPage1_t *buffer;
dma_addr_t dma_handle;
- int error;
+ int error=0;
+
+ if (ioc->sas_discovery_runtime &&
+ mptsas_is_end_device(&phy_info->attached))
+ goto out;
hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
hdr.ExtPageLength = 0;
@@ -1068,26 +1097,6 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
return error;
}
-/*
- * Returns true if there is a scsi end device
- */
-static inline int
-mptsas_is_end_device(struct mptsas_devinfo * attached)
-{
- if ((attached->handle) &&
- (attached->device_info &
- MPI_SAS_DEVICE_INFO_END_DEVICE) &&
- ((attached->device_info &
- MPI_SAS_DEVICE_INFO_SSP_TARGET) |
- (attached->device_info &
- MPI_SAS_DEVICE_INFO_STP_TARGET) |
- (attached->device_info &
- MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
- return 1;
- else
- return 0;
-}
-
static void
mptsas_parse_device_info(struct sas_identify *identify,
struct mptsas_devinfo *device_info)
@@ -1737,6 +1746,9 @@ mptsas_hotplug_work(void *arg)
break;
case MPTSAS_ADD_DEVICE:
+ if (ev->phys_disk_num_valid)
+ mpt_findImVolumes(ioc);
+
/*
* Refresh sas device pg0 data
*/
@@ -1868,6 +1880,9 @@ mptsas_hotplug_work(void *arg)
scsi_device_put(sdev);
mpt_findImVolumes(ioc);
break;
+ case MPTSAS_IGNORE_EVENT:
+ default:
+ break;
}
kfree(ev);
@@ -1940,7 +1955,8 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
EVENT_DATA_RAID *raid_event_data)
{
struct mptsas_hotplug_event *ev;
- RAID_VOL0_STATUS * volumeStatus;
+ int status = le32_to_cpu(raid_event_data->SettingsStatus);
+ int state = (status >> 8) & 0xff;
if (ioc->bus_type != SAS)
return;
@@ -1955,6 +1971,7 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
ev->ioc = ioc;
ev->id = raid_event_data->VolumeID;
+ ev->event_type = MPTSAS_IGNORE_EVENT;
switch (raid_event_data->ReasonCode) {
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
@@ -1966,6 +1983,25 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
ev->phys_disk_num = raid_event_data->PhysDiskNum;
ev->event_type = MPTSAS_DEL_DEVICE;
break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
+ switch (state) {
+ case MPI_PD_STATE_ONLINE:
+ ioc->raid_data.isRaid = 1;
+ ev->phys_disk_num_valid = 1;
+ ev->phys_disk_num = raid_event_data->PhysDiskNum;
+ ev->event_type = MPTSAS_ADD_DEVICE;
+ break;
+ case MPI_PD_STATE_MISSING:
+ case MPI_PD_STATE_NOT_COMPATIBLE:
+ case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
+ case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
+ case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
+ ev->event_type = MPTSAS_DEL_DEVICE;
+ break;
+ default:
+ break;
+ }
+ break;
case MPI_EVENT_RAID_RC_VOLUME_DELETED:
ev->event_type = MPTSAS_DEL_RAID;
break;
@@ -1973,11 +2009,18 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
ev->event_type = MPTSAS_ADD_RAID;
break;
case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
- volumeStatus = (RAID_VOL0_STATUS *) &
- raid_event_data->SettingsStatus;
- ev->event_type = (volumeStatus->State ==
- MPI_RAIDVOL0_STATUS_STATE_FAILED) ?
- MPTSAS_DEL_RAID : MPTSAS_ADD_RAID;
+ switch (state) {
+ case MPI_RAIDVOL0_STATUS_STATE_FAILED:
+ case MPI_RAIDVOL0_STATUS_STATE_MISSING:
+ ev->event_type = MPTSAS_DEL_RAID;
+ break;
+ case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
+ case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
+ ev->event_type = MPTSAS_ADD_RAID;
+ break;
+ default:
+ break;
+ }
break;
default:
break;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 3729062db317..84fa271eb8f4 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -632,7 +632,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
/* Spoof to SCSI Selection Timeout! */
- sc->result = DID_NO_CONNECT << 16;
+ if (ioc->bus_type != FC)
+ sc->result = DID_NO_CONNECT << 16;
+ /* else fibre, just stall until rescan event */
+ else
+ sc->result = DID_REQUEUE << 16;
if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
hd->sel_timeout[pScsiReq->TargetID]++;
@@ -877,7 +881,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
struct scsi_cmnd *sc;
dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
- vdevice->target_id, vdevice->lun, max));
+ vdevice->vtarget->target_id, vdevice->lun, max));
for (ii=0; ii < max; ii++) {
if ((sc = hd->ScsiLookup[ii]) != NULL) {
@@ -1645,7 +1649,6 @@ int
mptscsih_abort(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
- MPT_ADAPTER *ioc;
MPT_FRAME_HDR *mf;
u32 ctx2abort;
int scpnt_idx;
@@ -1663,14 +1666,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
return FAILED;
}
- ioc = hd->ioc;
- if (hd->resetPending) {
- return FAILED;
- }
-
- if (hd->timeouts < -1)
- hd->timeouts++;
-
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
@@ -1684,6 +1679,13 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
return SUCCESS;
}
+ if (hd->resetPending) {
+ return FAILED;
+ }
+
+ if (hd->timeouts < -1)
+ hd->timeouts++;
+
printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
hd->ioc->name, SCpnt);
scsi_print_command(SCpnt);
@@ -1703,7 +1705,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
vdev = SCpnt->device->hostdata;
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun,
- ctx2abort, mptscsih_get_tm_timeout(ioc));
+ ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
hd->ioc->name,
@@ -2521,15 +2523,15 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
/* 7. FC: Rescan for blocked rports which might have returned.
*/
- else if (ioc->bus_type == FC) {
- int work_count;
- unsigned long flags;
-
+ if (ioc->bus_type == FC) {
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
- work_count = ++ioc->fc_rescan_work_count;
+ if (ioc->fc_rescan_work_q) {
+ if (ioc->fc_rescan_work_count++ == 0) {
+ queue_work(ioc->fc_rescan_work_q,
+ &ioc->fc_rescan_work);
+ }
+ }
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
- if (work_count == 1)
- schedule_work(&ioc->fc_rescan_work);
}
dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name));
@@ -2544,7 +2546,6 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
MPT_SCSI_HOST *hd;
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
- int work_count;
unsigned long flags;
devtverboseprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
@@ -2569,10 +2570,13 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
case MPI_EVENT_RESCAN: /* 06 */
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
- work_count = ++ioc->fc_rescan_work_count;
+ if (ioc->fc_rescan_work_q) {
+ if (ioc->fc_rescan_work_count++ == 0) {
+ queue_work(ioc->fc_rescan_work_q,
+ &ioc->fc_rescan_work);
+ }
+ }
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
- if (work_count == 1)
- schedule_work(&ioc->fc_rescan_work);
break;
/*
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 09c745b19cc8..f2a4d382ea19 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -783,6 +783,70 @@ static struct pci_device_id mptspi_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
+
+/*
+ * renegotiate for a given target
+ */
+static void
+mptspi_dv_renegotiate_work(void *data)
+{
+ struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+ struct _MPT_SCSI_HOST *hd = wqw->hd;
+ struct scsi_device *sdev;
+
+ kfree(wqw);
+
+ shost_for_each_device(sdev, hd->ioc->sh)
+ mptspi_dv_device(hd, sdev);
+}
+
+static void
+mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
+{
+ struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC);
+
+ if (!wqw)
+ return;
+
+ INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
+ wqw->hd = hd;
+
+ schedule_work(&wqw->work);
+}
+
+/*
+ * spi module reset handler
+ */
+static int
+mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+ struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata;
+ int rc;
+
+ rc = mptscsih_ioc_reset(ioc, reset_phase);
+
+ if (reset_phase == MPT_IOC_POST_RESET)
+ mptspi_dv_renegotiate(hd);
+
+ return rc;
+}
+
+/*
+ * spi module resume handler
+ */
+static int
+mptspi_resume(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata;
+ int rc;
+
+ rc = mptscsih_resume(pdev);
+ mptspi_dv_renegotiate(hd);
+
+ return rc;
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
@@ -1032,7 +1096,7 @@ static struct pci_driver mptspi_driver = {
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
- .resume = mptscsih_resume,
+ .resume = mptspi_resume,
#endif
};
@@ -1061,7 +1125,7 @@ mptspi_init(void)
": Registered for IOC event notifications\n"));
}
- if (mpt_reset_register(mptspiDoneCtx, mptscsih_ioc_reset) == 0) {
+ if (mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset) == 0) {
dprintk((KERN_INFO MYNAM
": Registered for IOC reset notifications\n"));
}
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 1363083b4d83..14dbad14afb6 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -52,6 +52,7 @@
#include <linux/mii.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
+#include <linux/crc32.h>
#include <asm/mipsregs.h>
#include <asm/irq.h>
#include <asm/io.h>
@@ -2070,23 +2071,6 @@ static void au1000_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-
-static unsigned const ethernet_polynomial = 0x04c11db7U;
-static inline u32 ether_crc(int length, unsigned char *data)
-{
- int crc = -1;
-
- while(--length >= 0) {
- unsigned char current_octet = *data++;
- int bit;
- for (bit = 0; bit < 8; bit++, current_octet >>= 1)
- crc = (crc << 1) ^
- ((crc < 0) ^ (current_octet & 1) ?
- ethernet_polynomial : 0);
- }
- return crc;
-}
-
static void set_rx_mode(struct net_device *dev)
{
struct au1000_private *aup = (struct au1000_private *) dev->priv;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 1f3627470c95..1ddefd281213 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq)
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
- np->tx_ring[entry].fraginfo & 0xffffffffffff,
+ np->tx_ring[entry].fraginfo & DMA_48BIT_MASK,
skb->len, PCI_DMA_TODEVICE);
if (irq)
dev_kfree_skb_irq (skb);
@@ -893,7 +893,7 @@ receive_packet (struct net_device *dev)
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
pci_unmap_single (np->pdev,
- desc->fraginfo & 0xffffffffffff,
+ desc->fraginfo & DMA_48BIT_MASK,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
@@ -901,7 +901,7 @@ receive_packet (struct net_device *dev)
} else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
pci_dma_sync_single_for_cpu(np->pdev,
desc->fraginfo &
- 0xffffffffffff,
+ DMA_48BIT_MASK,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb->dev = dev;
@@ -913,7 +913,7 @@ receive_packet (struct net_device *dev)
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
desc->fraginfo &
- 0xffffffffffff,
+ DMA_48BIT_MASK,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
@@ -1800,7 +1800,7 @@ rio_close (struct net_device *dev)
skb = np->rx_skbuff[i];
if (skb) {
pci_unmap_single(np->pdev,
- np->rx_ring[i].fraginfo & 0xffffffffffff,
+ np->rx_ring[i].fraginfo & DMA_48BIT_MASK,
skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
np->rx_skbuff[i] = NULL;
@@ -1810,7 +1810,7 @@ rio_close (struct net_device *dev)
skb = np->tx_skbuff[i];
if (skb) {
pci_unmap_single(np->pdev,
- np->tx_ring[i].fraginfo & 0xffffffffffff,
+ np->tx_ring[i].fraginfo & DMA_48BIT_MASK,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 27ab75f20799..c1ce2398efea 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -46,4 +46,4 @@ obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
# The SIR helper module
-sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o
+sir-dev-objs := sir_dev.o sir_dongle.o
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 96bdb73c2283..cd87593e4e8a 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1778,7 +1778,7 @@ static int irda_usb_probe(struct usb_interface *intf,
if (self->needspatch) {
ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0),
- 0x02, 0x40, 0, 0, 0, 0, msecs_to_jiffies(500));
+ 0x02, 0x40, 0, 0, NULL, 0, 500);
if (ret < 0) {
IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret);
goto err_out_3;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index f69fb4cec76f..9fa294a546d6 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -15,23 +15,14 @@
#define IRDA_SIR_H
#include <linux/netdevice.h>
+#include <linux/workqueue.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h> // iobuff_t
-/* FIXME: unify irda_request with sir_fsm! */
-
-struct irda_request {
- struct list_head lh_request;
- unsigned long pending;
- void (*func)(void *);
- void *data;
- struct timer_list timer;
-};
-
struct sir_fsm {
struct semaphore sem;
- struct irda_request rq;
+ struct work_struct work;
unsigned state, substate;
int param;
int result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index ea7c9464d46a..3b5854d10c17 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -23,6 +23,298 @@
#include "sir-dev.h"
+
+static struct workqueue_struct *irda_sir_wq;
+
+/* STATE MACHINE */
+
+/* substate handler of the config-fsm to handle the cases where we want
+ * to wait for transmit completion before changing the port configuration
+ */
+
+static int sirdev_tx_complete_fsm(struct sir_dev *dev)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+ unsigned next_state, delay;
+ unsigned bytes_left;
+
+ do {
+ next_state = fsm->substate; /* default: stay in current substate */
+ delay = 0;
+
+ switch(fsm->substate) {
+
+ case SIRDEV_STATE_WAIT_XMIT:
+ if (dev->drv->chars_in_buffer)
+ bytes_left = dev->drv->chars_in_buffer(dev);
+ else
+ bytes_left = 0;
+ if (!bytes_left) {
+ next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
+ break;
+ }
+
+ if (dev->speed > 115200)
+ delay = (bytes_left*8*10000) / (dev->speed/100);
+ else if (dev->speed > 0)
+ delay = (bytes_left*10*10000) / (dev->speed/100);
+ else
+ delay = 0;
+ /* expected delay (usec) until remaining bytes are sent */
+ if (delay < 100) {
+ udelay(delay);
+ delay = 0;
+ break;
+ }
+ /* sleep some longer delay (msec) */
+ delay = (delay+999) / 1000;
+ break;
+
+ case SIRDEV_STATE_WAIT_UNTIL_SENT:
+ /* block until underlaying hardware buffer are empty */
+ if (dev->drv->wait_until_sent)
+ dev->drv->wait_until_sent(dev);
+ next_state = SIRDEV_STATE_TX_DONE;
+ break;
+
+ case SIRDEV_STATE_TX_DONE:
+ return 0;
+
+ default:
+ IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ fsm->substate = next_state;
+ } while (delay == 0);
+ return delay;
+}
+
+/*
+ * Function sirdev_config_fsm
+ *
+ * State machine to handle the configuration of the device (and attached dongle, if any).
+ * This handler is scheduled for execution in kIrDAd context, so we can sleep.
+ * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
+ * long. Instead, for longer delays we start a timer to reschedule us later.
+ * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
+ * Both must be unlocked/restarted on completion - but only on final exit.
+ */
+
+static void sirdev_config_fsm(void *data)
+{
+ struct sir_dev *dev = data;
+ struct sir_fsm *fsm = &dev->fsm;
+ int next_state;
+ int ret = -1;
+ unsigned delay;
+
+ IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
+
+ do {
+ IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
+ __FUNCTION__, fsm->state, fsm->substate);
+
+ next_state = fsm->state;
+ delay = 0;
+
+ switch(fsm->state) {
+
+ case SIRDEV_STATE_DONGLE_OPEN:
+ if (dev->dongle_drv != NULL) {
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+
+ /* Initialize dongle */
+ ret = sirdev_get_dongle(dev, fsm->param);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ /* Dongles are powered through the modem control lines which
+ * were just set during open. Before resetting, let's wait for
+ * the power to stabilize. This is what some dongle drivers did
+ * in open before, while others didn't - should be safe anyway.
+ */
+
+ delay = 50;
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+
+ fsm->param = 9600;
+
+ break;
+
+ case SIRDEV_STATE_DONGLE_CLOSE:
+ /* shouldn't we just treat this as success=? */
+ if (dev->dongle_drv == NULL) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_DTR_RTS:
+ ret = sirdev_set_dtr_rts(dev,
+ (fsm->param&0x02) ? TRUE : FALSE,
+ (fsm->param&0x01) ? TRUE : FALSE);
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_SPEED:
+ fsm->substate = SIRDEV_STATE_WAIT_XMIT;
+ next_state = SIRDEV_STATE_DONGLE_CHECK;
+ break;
+
+ case SIRDEV_STATE_DONGLE_CHECK:
+ ret = sirdev_tx_complete_fsm(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ if ((delay=ret) != 0)
+ break;
+
+ if (dev->dongle_drv) {
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+ }
+ else {
+ dev->speed = fsm->param;
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_RESET:
+ if (dev->dongle_drv->reset) {
+ ret = dev->dongle_drv->reset(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0) {
+ /* set serial port according to dongle default speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
+ next_state = SIRDEV_STATE_DONGLE_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+ if (dev->dongle_drv->reset) {
+ ret = dev->dongle_drv->set_speed(dev, fsm->param);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0)
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ break;
+
+ case SIRDEV_STATE_PORT_SPEED:
+ /* Finally we are ready to change the serial port speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ dev->new_speed = 0;
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_DONE:
+ /* Signal network layer so it can send more frames */
+ netif_wake_queue(dev->netdev);
+ next_state = SIRDEV_STATE_COMPLETE;
+ break;
+
+ default:
+ IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ fsm->result = -EINVAL;
+ /* fall thru */
+
+ case SIRDEV_STATE_ERROR:
+ IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
+
+#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
+ netif_stop_queue(dev->netdev);
+#else
+ netif_wake_queue(dev->netdev);
+#endif
+ /* fall thru */
+
+ case SIRDEV_STATE_COMPLETE:
+ /* config change finished, so we are not busy any longer */
+ sirdev_enable_rx(dev);
+ up(&fsm->sem);
+ return;
+ }
+ fsm->state = next_state;
+ } while(!delay);
+
+ queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
+}
+
+/* schedule some device configuration task for execution by kIrDAd
+ * on behalf of the above state machine.
+ * can be called from process or interrupt/tasklet context.
+ */
+
+int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+
+ IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
+
+ if (down_trylock(&fsm->sem)) {
+ if (in_interrupt() || in_atomic() || irqs_disabled()) {
+ IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
+ return -EWOULDBLOCK;
+ } else
+ down(&fsm->sem);
+ }
+
+ if (fsm->state == SIRDEV_STATE_DEAD) {
+ /* race with sirdev_close should never happen */
+ IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
+ up(&fsm->sem);
+ return -ESTALE; /* or better EPIPE? */
+ }
+
+ netif_stop_queue(dev->netdev);
+ atomic_set(&dev->enable_rx, 0);
+
+ fsm->state = initial_state;
+ fsm->param = param;
+ fsm->result = 0;
+
+ INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
+ queue_work(irda_sir_wq, &fsm->work);
+ return 0;
+}
+
+
/***************************************************************************/
void sirdev_enable_rx(struct sir_dev *dev)
@@ -619,10 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
spin_lock_init(&dev->tx_lock);
init_MUTEX(&dev->fsm.sem);
- INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
- dev->fsm.rq.pending = 0;
- init_timer(&dev->fsm.rq.timer);
-
dev->drv = drv;
dev->netdev = ndev;
@@ -682,3 +970,22 @@ int sirdev_put_instance(struct sir_dev *dev)
}
EXPORT_SYMBOL(sirdev_put_instance);
+static int __init sir_wq_init(void)
+{
+ irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
+ if (!irda_sir_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit sir_wq_exit(void)
+{
+ destroy_workqueue(irda_sir_wq);
+}
+
+module_init(sir_wq_init);
+module_exit(sir_wq_exit);
+
+MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
+MODULE_DESCRIPTION("IrDA SIR core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
deleted file mode 100644
index e3904d6bfecd..000000000000
--- a/drivers/net/irda/sir_kthread.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*********************************************************************
- *
- * sir_kthread.c: dedicated thread to process scheduled
- * sir device setup requests
- *
- * Copyright (c) 2002 Martin Diehl
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- ********************************************************************/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/init.h>
-#include <linux/smp_lock.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-
-#include <net/irda/irda.h>
-
-#include "sir-dev.h"
-
-/**************************************************************************
- *
- * kIrDAd kernel thread and config state machine
- *
- */
-
-struct irda_request_queue {
- struct list_head request_list;
- spinlock_t lock;
- task_t *thread;
- struct completion exit;
- wait_queue_head_t kick, done;
- atomic_t num_pending;
-};
-
-static struct irda_request_queue irda_rq_queue;
-
-static int irda_queue_request(struct irda_request *rq)
-{
- int ret = 0;
- unsigned long flags;
-
- if (!test_and_set_bit(0, &rq->pending)) {
- spin_lock_irqsave(&irda_rq_queue.lock, flags);
- list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
- wake_up(&irda_rq_queue.kick);
- atomic_inc(&irda_rq_queue.num_pending);
- spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
- ret = 1;
- }
- return ret;
-}
-
-static void irda_request_timer(unsigned long data)
-{
- struct irda_request *rq = (struct irda_request *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&irda_rq_queue.lock, flags);
- list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
- wake_up(&irda_rq_queue.kick);
- spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
-}
-
-static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
-{
- int ret = 0;
- struct timer_list *timer = &rq->timer;
-
- if (!test_and_set_bit(0, &rq->pending)) {
- timer->expires = jiffies + delay;
- timer->function = irda_request_timer;
- timer->data = (unsigned long)rq;
- atomic_inc(&irda_rq_queue.num_pending);
- add_timer(timer);
- ret = 1;
- }
- return ret;
-}
-
-static void run_irda_queue(void)
-{
- unsigned long flags;
- struct list_head *entry, *tmp;
- struct irda_request *rq;
-
- spin_lock_irqsave(&irda_rq_queue.lock, flags);
- list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
- rq = list_entry(entry, struct irda_request, lh_request);
- list_del_init(entry);
- spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
-
- clear_bit(0, &rq->pending);
- rq->func(rq->data);
-
- if (atomic_dec_and_test(&irda_rq_queue.num_pending))
- wake_up(&irda_rq_queue.done);
-
- spin_lock_irqsave(&irda_rq_queue.lock, flags);
- }
- spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
-}
-
-static int irda_thread(void *startup)
-{
- DECLARE_WAITQUEUE(wait, current);
-
- daemonize("kIrDAd");
-
- irda_rq_queue.thread = current;
-
- complete((struct completion *)startup);
-
- while (irda_rq_queue.thread != NULL) {
-
- /* We use TASK_INTERRUPTIBLE, rather than
- * TASK_UNINTERRUPTIBLE. Andrew Morton made this
- * change ; he told me that it is safe, because "signal
- * blocking is now handled in daemonize()", he added
- * that the problem is that "uninterruptible sleep
- * contributes to load average", making user worry.
- * Jean II */
- set_task_state(current, TASK_INTERRUPTIBLE);
- add_wait_queue(&irda_rq_queue.kick, &wait);
- if (list_empty(&irda_rq_queue.request_list))
- schedule();
- else
- __set_task_state(current, TASK_RUNNING);
- remove_wait_queue(&irda_rq_queue.kick, &wait);
-
- /* make swsusp happy with our thread */
- try_to_freeze();
-
- run_irda_queue();
- }
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
- reparent_to_init();
-#endif
- complete_and_exit(&irda_rq_queue.exit, 0);
- /* never reached */
- return 0;
-}
-
-
-static void flush_irda_queue(void)
-{
- if (atomic_read(&irda_rq_queue.num_pending)) {
-
- DECLARE_WAITQUEUE(wait, current);
-
- if (!list_empty(&irda_rq_queue.request_list))
- run_irda_queue();
-
- set_task_state(current, TASK_UNINTERRUPTIBLE);
- add_wait_queue(&irda_rq_queue.done, &wait);
- if (atomic_read(&irda_rq_queue.num_pending))
- schedule();
- else
- __set_task_state(current, TASK_RUNNING);
- remove_wait_queue(&irda_rq_queue.done, &wait);
- }
-}
-
-/* substate handler of the config-fsm to handle the cases where we want
- * to wait for transmit completion before changing the port configuration
- */
-
-static int irda_tx_complete_fsm(struct sir_dev *dev)
-{
- struct sir_fsm *fsm = &dev->fsm;
- unsigned next_state, delay;
- unsigned bytes_left;
-
- do {
- next_state = fsm->substate; /* default: stay in current substate */
- delay = 0;
-
- switch(fsm->substate) {
-
- case SIRDEV_STATE_WAIT_XMIT:
- if (dev->drv->chars_in_buffer)
- bytes_left = dev->drv->chars_in_buffer(dev);
- else
- bytes_left = 0;
- if (!bytes_left) {
- next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
- break;
- }
-
- if (dev->speed > 115200)
- delay = (bytes_left*8*10000) / (dev->speed/100);
- else if (dev->speed > 0)
- delay = (bytes_left*10*10000) / (dev->speed/100);
- else
- delay = 0;
- /* expected delay (usec) until remaining bytes are sent */
- if (delay < 100) {
- udelay(delay);
- delay = 0;
- break;
- }
- /* sleep some longer delay (msec) */
- delay = (delay+999) / 1000;
- break;
-
- case SIRDEV_STATE_WAIT_UNTIL_SENT:
- /* block until underlaying hardware buffer are empty */
- if (dev->drv->wait_until_sent)
- dev->drv->wait_until_sent(dev);
- next_state = SIRDEV_STATE_TX_DONE;
- break;
-
- case SIRDEV_STATE_TX_DONE:
- return 0;
-
- default:
- IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
- return -EINVAL;
- }
- fsm->substate = next_state;
- } while (delay == 0);
- return delay;
-}
-
-/*
- * Function irda_config_fsm
- *
- * State machine to handle the configuration of the device (and attached dongle, if any).
- * This handler is scheduled for execution in kIrDAd context, so we can sleep.
- * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
- * long. Instead, for longer delays we start a timer to reschedule us later.
- * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
- * Both must be unlocked/restarted on completion - but only on final exit.
- */
-
-static void irda_config_fsm(void *data)
-{
- struct sir_dev *dev = data;
- struct sir_fsm *fsm = &dev->fsm;
- int next_state;
- int ret = -1;
- unsigned delay;
-
- IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
-
- do {
- IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
- __FUNCTION__, fsm->state, fsm->substate);
-
- next_state = fsm->state;
- delay = 0;
-
- switch(fsm->state) {
-
- case SIRDEV_STATE_DONGLE_OPEN:
- if (dev->dongle_drv != NULL) {
- ret = sirdev_put_dongle(dev);
- if (ret) {
- fsm->result = -EINVAL;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- }
-
- /* Initialize dongle */
- ret = sirdev_get_dongle(dev, fsm->param);
- if (ret) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
-
- /* Dongles are powered through the modem control lines which
- * were just set during open. Before resetting, let's wait for
- * the power to stabilize. This is what some dongle drivers did
- * in open before, while others didn't - should be safe anyway.
- */
-
- delay = 50;
- fsm->substate = SIRDEV_STATE_DONGLE_RESET;
- next_state = SIRDEV_STATE_DONGLE_RESET;
-
- fsm->param = 9600;
-
- break;
-
- case SIRDEV_STATE_DONGLE_CLOSE:
- /* shouldn't we just treat this as success=? */
- if (dev->dongle_drv == NULL) {
- fsm->result = -EINVAL;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
-
- ret = sirdev_put_dongle(dev);
- if (ret) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- next_state = SIRDEV_STATE_DONE;
- break;
-
- case SIRDEV_STATE_SET_DTR_RTS:
- ret = sirdev_set_dtr_rts(dev,
- (fsm->param&0x02) ? TRUE : FALSE,
- (fsm->param&0x01) ? TRUE : FALSE);
- next_state = SIRDEV_STATE_DONE;
- break;
-
- case SIRDEV_STATE_SET_SPEED:
- fsm->substate = SIRDEV_STATE_WAIT_XMIT;
- next_state = SIRDEV_STATE_DONGLE_CHECK;
- break;
-
- case SIRDEV_STATE_DONGLE_CHECK:
- ret = irda_tx_complete_fsm(dev);
- if (ret < 0) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- if ((delay=ret) != 0)
- break;
-
- if (dev->dongle_drv) {
- fsm->substate = SIRDEV_STATE_DONGLE_RESET;
- next_state = SIRDEV_STATE_DONGLE_RESET;
- }
- else {
- dev->speed = fsm->param;
- next_state = SIRDEV_STATE_PORT_SPEED;
- }
- break;
-
- case SIRDEV_STATE_DONGLE_RESET:
- if (dev->dongle_drv->reset) {
- ret = dev->dongle_drv->reset(dev);
- if (ret < 0) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- }
- else
- ret = 0;
- if ((delay=ret) == 0) {
- /* set serial port according to dongle default speed */
- if (dev->drv->set_speed)
- dev->drv->set_speed(dev, dev->speed);
- fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
- next_state = SIRDEV_STATE_DONGLE_SPEED;
- }
- break;
-
- case SIRDEV_STATE_DONGLE_SPEED:
- if (dev->dongle_drv->reset) {
- ret = dev->dongle_drv->set_speed(dev, fsm->param);
- if (ret < 0) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- }
- else
- ret = 0;
- if ((delay=ret) == 0)
- next_state = SIRDEV_STATE_PORT_SPEED;
- break;
-
- case SIRDEV_STATE_PORT_SPEED:
- /* Finally we are ready to change the serial port speed */
- if (dev->drv->set_speed)
- dev->drv->set_speed(dev, dev->speed);
- dev->new_speed = 0;
- next_state = SIRDEV_STATE_DONE;
- break;
-
- case SIRDEV_STATE_DONE:
- /* Signal network layer so it can send more frames */
- netif_wake_queue(dev->netdev);
- next_state = SIRDEV_STATE_COMPLETE;
- break;
-
- default:
- IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
- fsm->result = -EINVAL;
- /* fall thru */
-
- case SIRDEV_STATE_ERROR:
- IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
-
-#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
- netif_stop_queue(dev->netdev);
-#else
- netif_wake_queue(dev->netdev);
-#endif
- /* fall thru */
-
- case SIRDEV_STATE_COMPLETE:
- /* config change finished, so we are not busy any longer */
- sirdev_enable_rx(dev);
- up(&fsm->sem);
- return;
- }
- fsm->state = next_state;
- } while(!delay);
-
- irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
-}
-
-/* schedule some device configuration task for execution by kIrDAd
- * on behalf of the above state machine.
- * can be called from process or interrupt/tasklet context.
- */
-
-int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
-{
- struct sir_fsm *fsm = &dev->fsm;
- int xmit_was_down;
-
- IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
-
- if (down_trylock(&fsm->sem)) {
- if (in_interrupt() || in_atomic() || irqs_disabled()) {
- IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
- return -EWOULDBLOCK;
- } else
- down(&fsm->sem);
- }
-
- if (fsm->state == SIRDEV_STATE_DEAD) {
- /* race with sirdev_close should never happen */
- IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
- up(&fsm->sem);
- return -ESTALE; /* or better EPIPE? */
- }
-
- xmit_was_down = netif_queue_stopped(dev->netdev);
- netif_stop_queue(dev->netdev);
- atomic_set(&dev->enable_rx, 0);
-
- fsm->state = initial_state;
- fsm->param = param;
- fsm->result = 0;
-
- INIT_LIST_HEAD(&fsm->rq.lh_request);
- fsm->rq.pending = 0;
- fsm->rq.func = irda_config_fsm;
- fsm->rq.data = dev;
-
- if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
- atomic_set(&dev->enable_rx, 1);
- if (!xmit_was_down)
- netif_wake_queue(dev->netdev);
- up(&fsm->sem);
- return -EAGAIN;
- }
- return 0;
-}
-
-static int __init irda_thread_create(void)
-{
- struct completion startup;
- int pid;
-
- spin_lock_init(&irda_rq_queue.lock);
- irda_rq_queue.thread = NULL;
- INIT_LIST_HEAD(&irda_rq_queue.request_list);
- init_waitqueue_head(&irda_rq_queue.kick);
- init_waitqueue_head(&irda_rq_queue.done);
- atomic_set(&irda_rq_queue.num_pending, 0);
-
- init_completion(&startup);
- pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
- if (pid <= 0)
- return -EAGAIN;
- else
- wait_for_completion(&startup);
-
- return 0;
-}
-
-static void __exit irda_thread_join(void)
-{
- if (irda_rq_queue.thread) {
- flush_irda_queue();
- init_completion(&irda_rq_queue.exit);
- irda_rq_queue.thread = NULL;
- wake_up(&irda_rq_queue.kick);
- wait_for_completion(&irda_rq_queue.exit);
- }
-}
-
-module_init(irda_thread_create);
-module_exit(irda_thread_join);
-
-MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
-MODULE_DESCRIPTION("IrDA SIR core");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 58f76cefbc83..a4674044bd6f 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -54,6 +54,7 @@
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
#include <linux/dma-mapping.h>
+#include <linux/pnp.h>
#include <linux/platform_device.h>
#include <asm/io.h>
@@ -358,6 +359,16 @@ static inline void register_bank(int iobase, int bank)
iobase + IRCC_MASTER);
}
+#ifdef CONFIG_PNP
+/* PNP hotplug support */
+static const struct pnp_device_id smsc_ircc_pnp_table[] = {
+ { .id = "SMCf010", .driver_data = 0 },
+ /* and presumably others */
+ { }
+};
+MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
+#endif
+
/*******************************************************************************
*
@@ -2072,7 +2083,8 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
/* PROBING
*
- *
+ * REVISIT we can be told about the device by PNP, and should use that info
+ * instead of probing hardware and creating a platform_device ...
*/
static int __init smsc_ircc_look_for_chips(void)
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 93c494bcd18d..b32765215f75 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -139,8 +139,9 @@ bad_clone_list[] __initdata = {
#if defined(CONFIG_PLAT_MAPPI)
# define DCR_VAL 0x4b
-#elif defined(CONFIG_PLAT_OAKS32R)
-# define DCR_VAL 0x48
+#elif defined(CONFIG_PLAT_OAKS32R) || \
+ defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+# define DCR_VAL 0x48 /* 8-bit mode */
#else
# define DCR_VAL 0x49
#endif
@@ -396,10 +397,22 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
/* We must set the 8390 for word mode. */
outb_p(DCR_VAL, ioaddr + EN0_DCFG);
start_page = NESM_START_PG;
- stop_page = NESM_STOP_PG;
+
+ /*
+ * Realtek RTL8019AS datasheet says that the PSTOP register
+ * shouldn't exceed 0x60 in 8-bit mode.
+ * This chip can be identified by reading the signature from
+ * the remote byte count registers (otherwise write-only)...
+ */
+ if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */
+ inb(ioaddr + EN0_RCNTLO) == 0x50 &&
+ inb(ioaddr + EN0_RCNTHI) == 0x70)
+ stop_page = 0x60;
+ else
+ stop_page = NESM_STOP_PG;
} else {
start_page = NE1SM_START_PG;
- stop_page = NE1SM_STOP_PG;
+ stop_page = NE1SM_STOP_PG;
}
#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R)
@@ -509,15 +522,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
ei_status.name = name;
ei_status.tx_start_page = start_page;
ei_status.stop_page = stop_page;
-#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
- wordlength = 1;
-#endif
-#ifdef CONFIG_PLAT_OAKS32R
- ei_status.word16 = 0;
-#else
- ei_status.word16 = (wordlength == 2);
-#endif
+ /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */
+ ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01));
ei_status.rx_start_page = start_page + TX_PAGES;
#ifdef PACKETBUF_MEMSIZE
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 459443b572ce..1b236bdf6b92 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -60,8 +60,10 @@ int mdiobus_register(struct mii_bus *bus)
for (i = 0; i < PHY_MAX_ADDR; i++) {
struct phy_device *phydev;
- if (bus->phy_mask & (1 << i))
+ if (bus->phy_mask & (1 << i)) {
+ bus->phy_map[i] = NULL;
continue;
+ }
phydev = get_phy_device(bus, i);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index b82191d2bee1..f5a3bf4d959a 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -127,6 +127,7 @@ static const struct mii_chip_info {
} mii_chip_table[] = {
{ "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
{ "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
+ { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN },
{ "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
{ "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
{ "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 227df9876a2c..ffd267fab21d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.2"
+#define DRV_VERSION "1.3"
#define PFX DRV_NAME " "
/*
@@ -79,6 +79,8 @@
#define NAPI_WEIGHT 64
#define PHY_RETRIES 1000
+#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
+
static const u32 default_msg =
NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
| NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
@@ -96,6 +98,10 @@ static int disable_msi = 0;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+static int idle_timeout = 100;
+module_param(idle_timeout, int, 0);
+MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)");
+
static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -122,6 +128,7 @@ MODULE_DEVICE_TABLE(pci, sky2_id_table);
/* Avoid conditionals by using array */
static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
+static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
/* This driver supports yukon2 chipset only */
static const char *yukon2_name[] = {
@@ -298,7 +305,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
- if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) {
+ if (sky2->autoneg == AUTONEG_ENABLE &&
+ (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -326,7 +334,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
if (sky2->autoneg == AUTONEG_ENABLE &&
- hw->chip_id == CHIP_ID_YUKON_XL) {
+ (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
ctrl &= ~PHY_M_PC_DSC_MSK;
ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
}
@@ -442,10 +450,11 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
/* set LED Function Control register */
- gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
- PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
- PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
- PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
+ PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
+ PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
/* set Polarity Control register */
gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
@@ -459,6 +468,25 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
/* restore page register */
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
break;
+ case CHIP_ID_YUKON_EC_U:
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+ /* select page 3 to access LED control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+ /* set LED Function Control register */
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
+ PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
+ PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
+
+ /* set Blink Rate in LED Timer Control Register */
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK,
+ ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
+ /* restore page register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ break;
default:
/* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
@@ -467,19 +495,21 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
}
- if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
/* apply fixes in PHY AFE */
- gm_phy_write(hw, port, 22, 255);
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
+
/* increase differential signal amplitude in 10BASE-T */
- gm_phy_write(hw, port, 24, 0xaa99);
- gm_phy_write(hw, port, 23, 0x2011);
+ gm_phy_write(hw, port, 0x18, 0xaa99);
+ gm_phy_write(hw, port, 0x17, 0x2011);
/* fix for IEEE A/B Symmetry failure in 1000BASE-T */
- gm_phy_write(hw, port, 24, 0xa204);
- gm_phy_write(hw, port, 23, 0x2002);
+ gm_phy_write(hw, port, 0x18, 0xa204);
+ gm_phy_write(hw, port, 0x17, 0x2002);
/* set page register to 0 */
- gm_phy_write(hw, port, 22, 0);
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
} else {
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
@@ -553,6 +583,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
if (sky2->duplex == DUPLEX_FULL)
reg |= GM_GPCR_DUP_FULL;
+
+ /* turn off pause in 10/100mbps half duplex */
+ else if (sky2->speed != SPEED_1000 &&
+ hw->chip_id != CHIP_ID_YUKON_EC_U)
+ sky2->tx_pause = sky2->rx_pause = 0;
} else
reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
@@ -719,7 +754,7 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
{
struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
- sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE;
+ sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
return le;
}
@@ -735,7 +770,7 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
{
struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
- sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE;
+ sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
return le;
}
@@ -1050,7 +1085,7 @@ static int sky2_up(struct net_device *dev)
/* Enable interrupts from phy/mac for port */
imask = sky2_read32(hw, B0_IMSK);
- imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
+ imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
return 0;
@@ -1078,7 +1113,7 @@ err_out:
/* Modular subtraction in ring */
static inline int tx_dist(unsigned tail, unsigned head)
{
- return (head - tail) % TX_RING_SIZE;
+ return (head - tail) & (TX_RING_SIZE - 1);
}
/* Number of list elements available for next tx */
@@ -1255,7 +1290,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
le->opcode = OP_BUFFER | HW_OWNER;
fre = sky2->tx_ring
- + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE;
+ + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
pci_unmap_addr_set(fre, mapaddr, mapping);
}
@@ -1315,7 +1350,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct tx_ring_info *fre;
- fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE;
+ fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE);
pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
@@ -1401,7 +1436,7 @@ static int sky2_down(struct net_device *dev)
/* Disable port IRQ */
imask = sky2_read32(hw, B0_IMSK);
- imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
+ imask &= ~portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
/* turn off LED's */
@@ -1498,17 +1533,26 @@ static void sky2_link_up(struct sky2_port *sky2)
sky2_write8(hw, SK_REG(port, LNK_LED_REG),
LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
- if (hw->chip_id == CHIP_ID_YUKON_XL) {
+ if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) {
u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+ u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
+
+ switch(sky2->speed) {
+ case SPEED_10:
+ led |= PHY_M_LEDC_INIT_CTRL(7);
+ break;
+
+ case SPEED_100:
+ led |= PHY_M_LEDC_STA1_CTRL(7);
+ break;
+
+ case SPEED_1000:
+ led |= PHY_M_LEDC_STA0_CTRL(7);
+ break;
+ }
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
- gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
- PHY_M_LEDC_INIT_CTRL(sky2->speed ==
- SPEED_10 ? 7 : 0) |
- PHY_M_LEDC_STA1_CTRL(sky2->speed ==
- SPEED_100 ? 7 : 0) |
- PHY_M_LEDC_STA0_CTRL(sky2->speed ==
- SPEED_1000 ? 7 : 0));
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led);
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
}
@@ -1583,7 +1627,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
sky2->speed = sky2_phy_speed(hw, aux);
/* Pause bits are offset (9..8) */
- if (hw->chip_id == CHIP_ID_YUKON_XL)
+ if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
aux >>= 6;
sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
@@ -1859,35 +1903,28 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
int work_done = 0;
+ u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
rmb();
- for(;;) {
+ while (hw->st_idx != hwidx) {
struct sky2_status_le *le = hw->st_le + hw->st_idx;
struct net_device *dev;
struct sky2_port *sky2;
struct sk_buff *skb;
u32 status;
u16 length;
- u8 link, opcode;
- opcode = le->opcode;
- if (!opcode)
- break;
- opcode &= ~HW_OWNER;
-
- hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
- le->opcode = 0;
+ hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
- link = le->link;
- BUG_ON(link >= 2);
- dev = hw->dev[link];
+ BUG_ON(le->link >= 2);
+ dev = hw->dev[le->link];
sky2 = netdev_priv(dev);
length = le->length;
status = le->status;
- switch (opcode) {
+ switch (le->opcode & ~HW_OWNER) {
case OP_RXSTAT:
skb = sky2_receive(sky2, length, status);
if (!skb)
@@ -1927,7 +1964,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
case OP_TXINDEXLE:
/* TX index reports status for both ports */
- sky2_tx_done(hw->dev[0], status & 0xffff);
+ BUILD_BUG_ON(TX_RING_SIZE > 0x1000);
+ sky2_tx_done(hw->dev[0], status & 0xfff);
if (hw->dev[1])
sky2_tx_done(hw->dev[1],
((status >> 24) & 0xff)
@@ -1937,8 +1975,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
default:
if (net_ratelimit())
printk(KERN_WARNING PFX
- "unknown status opcode 0x%x\n", opcode);
- break;
+ "unknown status opcode 0x%x\n", le->opcode);
+ goto exit_loop;
}
}
@@ -2089,12 +2127,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
*/
static void sky2_idle(unsigned long arg)
{
- struct net_device *dev = (struct net_device *) arg;
+ struct sky2_hw *hw = (struct sky2_hw *) arg;
+ struct net_device *dev = hw->dev[0];
- local_irq_disable();
if (__netif_rx_schedule_prep(dev))
__netif_rx_schedule(dev);
- local_irq_enable();
+
+ mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
}
@@ -2105,65 +2144,46 @@ static int sky2_poll(struct net_device *dev0, int *budget)
int work_done = 0;
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
- restart_poll:
- if (unlikely(status & ~Y2_IS_STAT_BMU)) {
- if (status & Y2_IS_HW_ERR)
- sky2_hw_intr(hw);
-
- if (status & Y2_IS_IRQ_PHY1)
- sky2_phy_intr(hw, 0);
-
- if (status & Y2_IS_IRQ_PHY2)
- sky2_phy_intr(hw, 1);
+ if (status & Y2_IS_HW_ERR)
+ sky2_hw_intr(hw);
- if (status & Y2_IS_IRQ_MAC1)
- sky2_mac_intr(hw, 0);
+ if (status & Y2_IS_IRQ_PHY1)
+ sky2_phy_intr(hw, 0);
- if (status & Y2_IS_IRQ_MAC2)
- sky2_mac_intr(hw, 1);
+ if (status & Y2_IS_IRQ_PHY2)
+ sky2_phy_intr(hw, 1);
- if (status & Y2_IS_CHK_RX1)
- sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
+ if (status & Y2_IS_IRQ_MAC1)
+ sky2_mac_intr(hw, 0);
- if (status & Y2_IS_CHK_RX2)
- sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
+ if (status & Y2_IS_IRQ_MAC2)
+ sky2_mac_intr(hw, 1);
- if (status & Y2_IS_CHK_TXA1)
- sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
+ if (status & Y2_IS_CHK_RX1)
+ sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
- if (status & Y2_IS_CHK_TXA2)
- sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
- }
+ if (status & Y2_IS_CHK_RX2)
+ sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
- if (status & Y2_IS_STAT_BMU) {
- work_done += sky2_status_intr(hw, work_limit - work_done);
- *budget -= work_done;
- dev0->quota -= work_done;
+ if (status & Y2_IS_CHK_TXA1)
+ sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
- if (work_done >= work_limit)
- return 1;
+ if (status & Y2_IS_CHK_TXA2)
+ sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
+ if (status & Y2_IS_STAT_BMU)
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
- }
-
- mod_timer(&hw->idle_timer, jiffies + HZ);
- local_irq_disable();
- __netif_rx_complete(dev0);
+ work_done = sky2_status_intr(hw, work_limit);
+ *budget -= work_done;
+ dev0->quota -= work_done;
- status = sky2_read32(hw, B0_Y2_SP_LISR);
+ if (work_done >= work_limit)
+ return 1;
- if (unlikely(status)) {
- /* More work pending, try and keep going */
- if (__netif_rx_schedule_prep(dev0)) {
- __netif_rx_reschedule(dev0, work_done);
- status = sky2_read32(hw, B0_Y2_SP_EISR);
- local_irq_enable();
- goto restart_poll;
- }
- }
+ netif_rx_complete(dev0);
- local_irq_enable();
+ status = sky2_read32(hw, B0_Y2_SP_LISR);
return 0;
}
@@ -2244,13 +2264,6 @@ static int __devinit sky2_reset(struct sky2_hw *hw)
return -EOPNOTSUPP;
}
- /* This chip is new and not tested yet */
- if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
- pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n",
- pci_name(hw->pdev));
- pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n");
- }
-
/* disable ASF */
if (hw->chip_id <= CHIP_ID_YUKON_EC) {
sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -3302,7 +3315,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) dev);
+ setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
+ if (idle_timeout > 0)
+ mod_timer(&hw->idle_timer,
+ jiffies + msecs_to_jiffies(idle_timeout));
pci_set_drvdata(pdev, hw);
@@ -3342,6 +3358,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
del_timer_sync(&hw->idle_timer);
sky2_write32(hw, B0_IMSK, 0);
+ synchronize_irq(hw->pdev->irq);
+
dev0 = hw->dev[0];
dev1 = hw->dev[1];
if (dev1)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index b026f5653f04..8012994c9b93 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -378,6 +378,9 @@ enum {
CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
+
+ CHIP_REV_YU_EC_U_A0 = 0,
+ CHIP_REV_YU_EC_U_A1 = 1,
};
/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 43f5e86fc559..394339d5e87c 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1652,6 +1652,8 @@ spider_net_enable_card(struct spider_net_card *card)
{ SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
{ SPIDER_NET_GMRWOLCTRL, 0 },
+ { SPIDER_NET_GTESTMD, 0x10000000 },
+ { SPIDER_NET_GTTQMSK, 0x00400040 },
{ SPIDER_NET_GTESTMD, 0 },
{ SPIDER_NET_GMACINTEN, 0 },
@@ -1792,15 +1794,7 @@ spider_net_setup_phy(struct spider_net_card *card)
if (phy->def->ops->setup_forced)
phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
- /* the following two writes could be moved to sungem_phy.c */
- /* enable fiber mode */
- spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x9020);
- /* LEDs active in both modes, autosense prio = fiber */
- spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
-
- /* switch off fibre autoneg */
- spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01);
- spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004);
+ phy->def->ops->enable_fiber(phy);
phy->def->ops->read_link(phy);
pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 5922b529a048..3b8d951cf73c 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -120,6 +120,8 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_GMRUAFILnR 0x00000500
#define SPIDER_NET_GMRUA0FIL15R 0x00000578
+#define SPIDER_NET_GTTQMSK 0x00000934
+
/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
* 0x00000b.. for DMA controller B, etc. */
#define SPIDER_NET_GDADCHA 0x00000a00
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 046371ee5bbe..b2ddd5e79303 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -329,6 +329,30 @@ static int bcm5421_init(struct mii_phy* phy)
return 0;
}
+static int bcm5421_enable_fiber(struct mii_phy* phy)
+{
+ /* enable fiber mode */
+ phy_write(phy, MII_NCONFIG, 0x9020);
+ /* LEDs active in both modes, autosense prio = fiber */
+ phy_write(phy, MII_NCONFIG, 0x945f);
+
+ /* switch off fibre autoneg */
+ phy_write(phy, MII_NCONFIG, 0xfc01);
+ phy_write(phy, 0x0b, 0x0004);
+
+ return 0;
+}
+
+static int bcm5461_enable_fiber(struct mii_phy* phy)
+{
+ phy_write(phy, MII_NCONFIG, 0xfc0c);
+ phy_write(phy, MII_BMCR, 0x4140);
+ phy_write(phy, MII_NCONFIG, 0xfc0b);
+ phy_write(phy, MII_BMCR, 0x0140);
+
+ return 0;
+}
+
static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
@@ -762,6 +786,7 @@ static struct mii_phy_ops bcm5421_phy_ops = {
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
+ .enable_fiber = bcm5421_enable_fiber,
};
static struct mii_phy_def bcm5421_phy_def = {
@@ -792,6 +817,25 @@ static struct mii_phy_def bcm5421k2_phy_def = {
.ops = &bcm5421k2_phy_ops
};
+static struct mii_phy_ops bcm5461_phy_ops = {
+ .init = bcm5421_init,
+ .suspend = generic_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+ .setup_forced = bcm54xx_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = bcm54xx_read_link,
+ .enable_fiber = bcm5461_enable_fiber,
+};
+
+static struct mii_phy_def bcm5461_phy_def = {
+ .phy_id = 0x002060c0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "BCM5461",
+ .features = MII_GBIT_FEATURES,
+ .magic_aneg = 1,
+ .ops = &bcm5461_phy_ops
+};
+
/* Broadcom BCM 5462 built-in Vesta */
static struct mii_phy_ops bcm5462V_phy_ops = {
.init = bcm5421_init,
@@ -857,6 +901,7 @@ static struct mii_phy_def* mii_phy_table[] = {
&bcm5411_phy_def,
&bcm5421_phy_def,
&bcm5421k2_phy_def,
+ &bcm5461_phy_def,
&bcm5462V_phy_def,
&marvell_phy_def,
&genmii_phy_def,
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
index 430544496c52..69e125197fcf 100644
--- a/drivers/net/sungem_phy.h
+++ b/drivers/net/sungem_phy.h
@@ -12,6 +12,7 @@ struct mii_phy_ops
int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
int (*poll_link)(struct mii_phy *phy);
int (*read_link)(struct mii_phy *phy);
+ int (*enable_fiber)(struct mii_phy *phy);
};
/* Structure used to statically define an mii/gii based PHY */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index beeb612be98f..2bd9592b75cd 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -8454,6 +8454,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
tx_len = 1514;
skb = dev_alloc_skb(tx_len);
+ if (!skb)
+ return -ENOMEM;
+
tx_data = skb_put(skb, tx_len);
memcpy(tx_data, tp->dev->dev_addr, 6);
memset(tx_data + 6, 0x0, 8);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 9a06e61df0a2..e2982a83ae42 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -939,9 +939,9 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
return 0;
}
-static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
+static int bcm43xx_geo_init(struct bcm43xx_private *bcm)
{
- struct ieee80211_geo geo;
+ struct ieee80211_geo *geo;
struct ieee80211_channel *chan;
int have_a = 0, have_bg = 0;
int i;
@@ -949,7 +949,10 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
struct bcm43xx_phyinfo *phy;
const char *iso_country;
- memset(&geo, 0, sizeof(geo));
+ geo = kzalloc(sizeof(*geo), GFP_KERNEL);
+ if (!geo)
+ return -ENOMEM;
+
for (i = 0; i < bcm->nr_80211_available; i++) {
phy = &(bcm->core_80211_ext[i].phy);
switch (phy->type) {
@@ -967,31 +970,36 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
iso_country = bcm43xx_locale_iso(bcm->sprom.locale);
if (have_a) {
- for (i = 0, channel = 0; channel < 201; channel++) {
- chan = &geo.a[i++];
+ for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL;
+ channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) {
+ chan = &geo->a[i++];
chan->freq = bcm43xx_channel_to_freq_a(channel);
chan->channel = channel;
}
- geo.a_channels = i;
+ geo->a_channels = i;
}
if (have_bg) {
- for (i = 0, channel = 1; channel < 15; channel++) {
- chan = &geo.bg[i++];
+ for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL;
+ channel <= IEEE80211_24GHZ_MAX_CHANNEL; channel++) {
+ chan = &geo->bg[i++];
chan->freq = bcm43xx_channel_to_freq_bg(channel);
chan->channel = channel;
}
- geo.bg_channels = i;
+ geo->bg_channels = i;
}
- memcpy(geo.name, iso_country, 2);
+ memcpy(geo->name, iso_country, 2);
if (0 /*TODO: Outdoor use only */)
- geo.name[2] = 'O';
+ geo->name[2] = 'O';
else if (0 /*TODO: Indoor use only */)
- geo.name[2] = 'I';
+ geo->name[2] = 'I';
else
- geo.name[2] = ' ';
- geo.name[3] = '\0';
+ geo->name[2] = ' ';
+ geo->name[3] = '\0';
+
+ ieee80211_set_geo(bcm->ieee, geo);
+ kfree(geo);
- ieee80211_set_geo(bcm->ieee, &geo);
+ return 0;
}
/* DummyTransmission function, as documented on
@@ -3479,16 +3487,17 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
goto err_80211_unwind;
bcm43xx_wireless_core_disable(bcm);
}
+ err = bcm43xx_geo_init(bcm);
+ if (err)
+ goto err_80211_unwind;
bcm43xx_pctl_set_crystal(bcm, 0);
/* Set the MAC address in the networking subsystem */
- if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A)
+ if (is_valid_ether_addr(bcm->sprom.et1macaddr))
memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6);
else
memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6);
- bcm43xx_geo_init(bcm);
-
snprintf(bcm->nick, IW_ESSID_MAX_SIZE,
"Broadcom %04X", bcm->chip_id);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index eca79a38594a..30a202b258b5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -118,12 +118,14 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm,
static inline
int bcm43xx_is_valid_channel_a(u8 channel)
{
- return (channel <= 200);
+ return (channel >= IEEE80211_52GHZ_MIN_CHANNEL
+ && channel <= IEEE80211_52GHZ_MAX_CHANNEL);
}
static inline
int bcm43xx_is_valid_channel_bg(u8 channel)
{
- return (channel >= 1 && channel <= 14);
+ return (channel >= IEEE80211_24GHZ_MIN_CHANNEL
+ && channel <= IEEE80211_24GHZ_MAX_CHANNEL);
}
static inline
int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index 33137165727f..b0abac515530 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -1287,7 +1287,7 @@ static void bcm43xx_phy_initg(struct bcm43xx_private *bcm)
if (radio->revision == 8)
bcm43xx_phy_write(bcm, 0x0805, 0x3230);
bcm43xx_phy_init_pctl(bcm);
- if (bcm->chip_id == 0x4306 && bcm->chip_package != 2) {
+ if (bcm->chip_id == 0x4306 && bcm->chip_package == 2) {
bcm43xx_phy_write(bcm, 0x0429,
bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF);
bcm43xx_phy_write(bcm, 0x04C3,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index 3edbb481a0a0..b45063974ae9 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -182,8 +182,11 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev,
mode = BCM43xx_INITIAL_IWMODE;
bcm43xx_lock_mmio(bcm, flags);
- if (bcm->ieee->iw_mode != mode)
- bcm43xx_set_iwmode(bcm, mode);
+ if (bcm->initialized) {
+ if (bcm->ieee->iw_mode != mode)
+ bcm43xx_set_iwmode(bcm, mode);
+ } else
+ bcm->ieee->iw_mode = mode;
bcm43xx_unlock_mmio(bcm, flags);
return 0;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index cb30d9c1153d..0c9c2f400bf6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -219,6 +219,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahc->flags |= AHC_39BIT_ADDRESSING;
} else {
if (dma_set_mask(dev, DMA_32BIT_MASK)) {
+ ahc_free(ahc);
printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
return (-ENODEV);
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 5f586140e057..3adecef21783 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -2036,12 +2036,12 @@ ahc_pci_resume(struct ahc_softc *ahc)
* that the OS doesn't know about and rely on our chip
* reset handler to handle the rest.
*/
- ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4,
- ahc->bus_softc.pci_softc.devconfig);
- ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1,
- ahc->bus_softc.pci_softc.command);
- ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1,
- ahc->bus_softc.pci_softc.csize_lattime);
+ ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
+ ahc->bus_softc.pci_softc.devconfig, /*bytes*/4);
+ ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
+ ahc->bus_softc.pci_softc.command, /*bytes*/1);
+ ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
+ ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1);
if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) {
struct seeprom_descriptor sd;
u_int sxfrctl1;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 0a8ad37ae899..2e9be83a697f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -739,7 +739,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
{
struct viosrp_adapter_info *req;
struct srp_event_struct *evt_struct;
-
+ dma_addr_t addr;
+
evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
@@ -757,10 +758,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
req->common.length = sizeof(hostdata->madapter_info);
- req->buffer = dma_map_single(hostdata->dev,
- &hostdata->madapter_info,
- sizeof(hostdata->madapter_info),
- DMA_BIDIRECTIONAL);
+ req->buffer = addr = dma_map_single(hostdata->dev,
+ &hostdata->madapter_info,
+ sizeof(hostdata->madapter_info),
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(req->buffer)) {
printk(KERN_ERR
@@ -770,8 +771,13 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
return;
}
- if (ibmvscsi_send_srp_event(evt_struct, hostdata))
+ if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
+ dma_unmap_single(hostdata->dev,
+ addr,
+ sizeof(hostdata->madapter_info),
+ DMA_BIDIRECTIONAL);
+ }
};
/**
@@ -1259,6 +1265,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
{
struct viosrp_host_config *host_config;
struct srp_event_struct *evt_struct;
+ dma_addr_t addr;
int rc;
evt_struct = get_event_struct(&hostdata->pool);
@@ -1279,8 +1286,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
memset(host_config, 0x00, sizeof(*host_config));
host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
host_config->common.length = length;
- host_config->buffer = dma_map_single(hostdata->dev, buffer, length,
- DMA_BIDIRECTIONAL);
+ host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
+ length,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(host_config->buffer)) {
printk(KERN_ERR
@@ -1291,11 +1299,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
init_completion(&evt_struct->comp);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
- if (rc == 0) {
+ if (rc == 0)
wait_for_completion(&evt_struct->comp);
- dma_unmap_single(hostdata->dev, host_config->buffer,
- length, DMA_BIDIRECTIONAL);
- }
+ dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fad607b2e6f4..ee22173fce43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -27,7 +27,6 @@ void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
uint32_t);
void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 8932b1be2b60..41cf5d3ea6ce 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -113,6 +113,7 @@ struct lpfc_nodelist {
#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
NPR list */
#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
+#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
/* Defines for list searchs */
#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4813beaaca8f..283b7d824c34 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -302,10 +302,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
goto fail_free_mbox;
- /*
- * set_slim mailbox command needs to execute first,
- * queue this command to be processed later.
- */
mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
mbox->context2 = ndlp;
@@ -781,25 +777,26 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (disc && phba->num_disc_nodes) {
/* Check to see if there are more PLOGIs to be sent */
lpfc_more_plogi(phba);
- }
- if (phba->num_disc_nodes == 0) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ if (phba->num_disc_nodes == 0) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(phba->host->host_lock);
- lpfc_can_disctmo(phba);
- if (phba->fc_flag & FC_RSCN_MODE) {
- /* Check to see if more RSCNs came in while we were
- * processing this one.
- */
- if ((phba->fc_rscn_id_cnt == 0) &&
- (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(phba->host->host_lock);
- } else {
- lpfc_els_handle_rscn(phba);
+ lpfc_can_disctmo(phba);
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ /*
+ * Check to see if more RSCNs came in while
+ * we were processing this one.
+ */
+ if ((phba->fc_rscn_id_cnt == 0) &&
+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(phba->host->host_lock);
+ } else {
+ lpfc_els_handle_rscn(phba);
+ }
}
}
}
@@ -1263,7 +1260,7 @@ lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
- cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name));
+ cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name);
elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_LOGO);
if (!elsiocb)
@@ -1451,22 +1448,23 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
* PLOGIs to be sent
*/
lpfc_more_plogi(phba);
- }
- if (phba->num_disc_nodes == 0) {
- phba->fc_flag &= ~FC_NDISC_ACTIVE;
- lpfc_can_disctmo(phba);
- if (phba->fc_flag & FC_RSCN_MODE) {
- /* Check to see if more RSCNs
- * came in while we were
- * processing this one.
- */
- if((phba->fc_rscn_id_cnt==0) &&
- (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
- phba->fc_flag &= ~FC_RSCN_MODE;
- }
- else {
- lpfc_els_handle_rscn(phba);
+ if (phba->num_disc_nodes == 0) {
+ phba->fc_flag &= ~FC_NDISC_ACTIVE;
+ lpfc_can_disctmo(phba);
+ if (phba->fc_flag & FC_RSCN_MODE) {
+ /*
+ * Check to see if more RSCNs
+ * came in while we were
+ * processing this one.
+ */
+ if((phba->fc_rscn_id_cnt==0) &&
+ !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
+ phba->fc_flag &= ~FC_RSCN_MODE;
+ }
+ else {
+ lpfc_els_handle_rscn(phba);
+ }
}
}
}
@@ -1872,9 +1870,6 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (mbox) {
if ((rspiocb->iocb.ulpStatus == 0)
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
- /* set_slim mailbox command needs to execute first,
- * queue this command to be processed later.
- */
lpfc_unreg_rpi(phba, ndlp);
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
mbox->context2 = ndlp;
@@ -1920,6 +1915,7 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ ELS_PKT *els_pkt_ptr;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
@@ -1958,6 +1954,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
pcmd += sizeof (uint32_t);
memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
break;
+ case ELS_CMD_PRLO:
+ cmdsize = sizeof (uint32_t) + sizeof (PRLO);
+ elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+ memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
+ sizeof (uint32_t) + sizeof (PRLO));
+ *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
+ els_pkt_ptr = (ELS_PKT *) pcmd;
+ els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
+ break;
default:
return 1;
}
@@ -2498,7 +2511,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
/* If we are about to begin discovery, just ACC the RSCN.
* Discovery processing will satisfy it.
*/
- if (phba->hba_state < LPFC_NS_QRY) {
+ if (phba->hba_state <= LPFC_NS_QRY) {
lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
newnode);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 6721e679df62..adb086009ae0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -311,8 +311,8 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
evtp->evt_arg2 = arg2;
evtp->evt = evt;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
spin_lock_irq(phba->host->host_lock);
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
wake_up(phba->work_wait);
spin_unlock_irq(phba->host->host_lock);
@@ -1071,10 +1071,6 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
/* initialize static port data */
rport->maxframe_size = ndlp->nlp_maxframe;
rport->supported_classes = ndlp->nlp_class_sup;
- if ((rport->scsi_target_id != -1) &&
- (rport->scsi_target_id < MAX_FCP_TARGET)) {
- ndlp->nlp_sid = rport->scsi_target_id;
- }
rdata = rport->dd_data;
rdata->pnode = ndlp;
@@ -1087,6 +1083,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
+ if ((rport->scsi_target_id != -1) &&
+ (rport->scsi_target_id < MAX_FCP_TARGET)) {
+ ndlp->nlp_sid = rport->scsi_target_id;
+ }
return;
}
@@ -1238,6 +1238,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
evt_listp);
}
+ nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
nlp->nlp_type |= NLP_FC_NODE;
break;
case NLP_MAPPED_LIST:
@@ -1258,6 +1259,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
evt_listp);
}
+ nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
break;
case NLP_NPR_LIST:
nlp->nlp_flag |= list;
@@ -1402,6 +1404,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
return 1;
case CMD_ELS_REQUEST64_CR:
+ if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
+ return 1;
case CMD_XMIT_ELS_RSP64_CX:
if (iocb->context1 == (uint8_t *) ndlp)
return 1;
@@ -1901,10 +1905,8 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
*/
if (ndlp->nlp_flag & NLP_DELAY_TMO)
lpfc_cancel_retry_delay_tmo(phba, ndlp);
- } else {
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ } else
ndlp = NULL;
- }
} else {
flg = ndlp->nlp_flag & NLP_LIST_MASK;
if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 54d04188f7cc..eedf98801366 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -449,6 +449,7 @@ struct serv_parm { /* Structure is in Big Endian format */
#define ELS_CMD_RRQ 0x12000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_PRLO 0x21100014
+#define ELS_CMD_PRLO_ACC 0x02100014
#define ELS_CMD_PDISC 0x50000000
#define ELS_CMD_FDISC 0x51000000
#define ELS_CMD_ADISC 0x52000000
@@ -484,6 +485,7 @@ struct serv_parm { /* Structure is in Big Endian format */
#define ELS_CMD_RRQ 0x12
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_PRLO 0x14001021
+#define ELS_CMD_PRLO_ACC 0x14001002
#define ELS_CMD_PDISC 0x50
#define ELS_CMD_FDISC 0x51
#define ELS_CMD_ADISC 0x52
@@ -1539,6 +1541,7 @@ typedef struct {
#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
+#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */
uint32_t link_speed;
#define LINK_SPEED_AUTO 0 /* Auto selection */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 66d5d003555d..908d0f27706f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -294,15 +294,6 @@ lpfc_config_port_post(struct lpfc_hba * phba)
}
}
- /* This should turn on DELAYED ABTS for ELS timeouts */
- lpfc_set_slim(phba, pmb, 0x052198, 0x1);
- if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
- phba->hba_state = LPFC_HBA_ERROR;
- mempool_free( pmb, phba->mbox_mem_pool);
- return -EIO;
- }
-
-
lpfc_read_config(phba, pmb);
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
lpfc_printf_log(phba,
@@ -804,7 +795,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
int max_speed;
char * ports;
char * bus;
- } m;
+ } m = {"<Unknown>", 0, "", ""};
pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
ports = (hdrtype == 0x80) ? "2-port " : "";
@@ -1627,7 +1618,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
error = lpfc_alloc_sysfs_attr(phba);
if (error)
- goto out_kthread_stop;
+ goto out_remove_host;
error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
LPFC_DRIVER_NAME, phba);
@@ -1644,8 +1635,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
error = lpfc_sli_hba_setup(phba);
- if (error)
+ if (error) {
+ error = -ENODEV;
goto out_free_irq;
+ }
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
spin_lock_irq(phba->host->host_lock);
@@ -1700,6 +1693,9 @@ out_free_irq:
free_irq(phba->pcidev->irq, phba);
out_free_sysfs_attr:
lpfc_free_sysfs_attr(phba);
+out_remove_host:
+ fc_remove_host(phba->host);
+ scsi_remove_host(phba->host);
out_kthread_stop:
kthread_stop(phba->worker_thread);
out_free_iocbq:
@@ -1721,12 +1717,14 @@ out_iounmap_slim:
out_idr_remove:
idr_remove(&lpfc_hba_index, phba->brd_no);
out_put_host:
+ phba->host = NULL;
scsi_host_put(host);
out_release_regions:
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
out:
+ pci_set_drvdata(pdev, NULL);
return error;
}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index c585e2b2e589..e42f22aaf71b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -200,6 +200,9 @@ lpfc_init_link(struct lpfc_hba * phba,
break;
}
+ /* Enable asynchronous ABTS responses from firmware */
+ mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
+
/* NEW_FEATURE
* Setting up the link speed
*/
@@ -292,36 +295,6 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
return;
}
-/***********************************************/
-
-/* command to write slim */
-/***********************************************/
-void
-lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
- uint32_t value)
-{
- MAILBOX_t *mb;
-
- mb = &pmb->mb;
- memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-
- /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
- /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
-
- /*
- * Always turn on DELAYED ABTS for ELS timeouts
- */
- if ((addr == 0x052198) && (value == 0))
- value = 1;
-
- mb->un.varWords[0] = addr;
- mb->un.varWords[1] = value;
-
- mb->mbxCommand = MBX_SET_SLIM;
- mb->mbxOwner = OWN_HOST;
- return;
-}
-
/**********************************************/
/* lpfc_read_nv Issue a READ CONFIG */
/* mailbox command */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 3d77bd999b70..27d60ad897cd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -465,14 +465,18 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
static int
lpfc_rcv_logo(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp,
- struct lpfc_iocbq *cmdiocb)
+ struct lpfc_iocbq *cmdiocb,
+ uint32_t els_cmd)
{
/* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
* PLOGIs during LOGO storms from a device.
*/
ndlp->nlp_flag |= NLP_LOGO_ACC;
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ if (els_cmd == ELS_CMD_PRLO)
+ lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
+ else
+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
if (!(ndlp->nlp_type & NLP_FABRIC) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
@@ -681,7 +685,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp, 1);
- lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -788,10 +792,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
if (lpfc_reg_login
(phba, irsp->un.elsreq64.remoteID,
(uint8_t *) sp, mbox, 0) == 0) {
- /* set_slim mailbox command needs to
- * execute first, queue this command to
- * be processed later.
- */
switch (ndlp->nlp_DID) {
case NameServer_DID:
mbox->mbox_cmpl =
@@ -832,11 +832,17 @@ static uint32_t
lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
- /* software abort outstanding PLOGI */
- lpfc_els_abort(phba, ndlp, 1);
+ if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ return ndlp->nlp_state;
+ }
+ else {
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
- lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
- return NLP_STE_FREED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return NLP_STE_FREED_NODE;
+ }
}
static uint32_t
@@ -851,7 +857,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
return ndlp->nlp_state;
@@ -905,7 +911,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp, 0);
- lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -932,7 +938,7 @@ lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
/* Treat like rcv logo */
- lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
return ndlp->nlp_state;
}
@@ -987,11 +993,17 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg,
uint32_t evt)
{
- /* software abort outstanding ADISC */
- lpfc_els_abort(phba, ndlp, 1);
+ if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ return ndlp->nlp_state;
+ }
+ else {
+ /* software abort outstanding ADISC */
+ lpfc_els_abort(phba, ndlp, 1);
- lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
- return NLP_STE_FREED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return NLP_STE_FREED_NODE;
+ }
}
static uint32_t
@@ -1006,7 +1018,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(phba->host->host_lock);
@@ -1048,7 +1060,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -1073,7 +1085,7 @@ lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
@@ -1133,8 +1145,14 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg,
uint32_t evt)
{
- lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
- return NLP_STE_FREED_NODE;
+ if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ return ndlp->nlp_state;
+ }
+ else {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return NLP_STE_FREED_NODE;
+ }
}
static uint32_t
@@ -1146,7 +1164,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
return ndlp->nlp_state;
}
@@ -1186,7 +1204,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
/* Software abort outstanding PRLI before sending acc */
lpfc_els_abort(phba, ndlp, 1);
- lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -1214,7 +1232,7 @@ lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
@@ -1278,11 +1296,17 @@ static uint32_t
lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
- /* software abort outstanding PRLI */
- lpfc_els_abort(phba, ndlp, 1);
+ if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ return ndlp->nlp_state;
+ }
+ else {
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
- lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
- return NLP_STE_FREED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return NLP_STE_FREED_NODE;
+ }
}
@@ -1313,7 +1337,7 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
return ndlp->nlp_state;
}
@@ -1351,7 +1375,7 @@ lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -1375,7 +1399,7 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
@@ -1386,7 +1410,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
lpfc_disc_set_adisc(phba, ndlp);
return ndlp->nlp_state;
@@ -1424,7 +1448,7 @@ lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -1456,7 +1480,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
spin_unlock_irq(phba->host->host_lock);
/* Treat like rcv logo */
- lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
return ndlp->nlp_state;
}
@@ -1469,7 +1493,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
ndlp->nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
lpfc_disc_set_adisc(phba, ndlp);
return ndlp->nlp_state;
@@ -1551,7 +1575,7 @@ lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_logo(phba, ndlp, cmdiocb);
+ lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -1617,9 +1641,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return NLP_STE_FREED_NODE;
+ }
return ndlp->nlp_state;
}
@@ -1628,9 +1659,16 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return NLP_STE_FREED_NODE;
+ }
return ndlp->nlp_state;
}
@@ -1649,9 +1687,16 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
+ IOCB_t *irsp;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
+
+ irsp = &rspiocb->iocb;
+ if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return NLP_STE_FREED_NODE;
+ }
return ndlp->nlp_state;
}
@@ -1668,7 +1713,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
if (!mb->mbxStatus)
ndlp->nlp_rpi = mb->un.varWords[0];
-
+ else {
+ if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ return NLP_STE_FREED_NODE;
+ }
+ }
return ndlp->nlp_state;
}
@@ -1677,6 +1727,10 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba,
struct lpfc_nodelist * ndlp, void *arg,
uint32_t evt)
{
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ return ndlp->nlp_state;
+ }
lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
return NLP_STE_FREED_NODE;
}
@@ -1687,7 +1741,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
uint32_t evt)
{
spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(phba->host->host_lock);
if (ndlp->nlp_flag & NLP_DELAY_TMO) {
lpfc_cancel_retry_delay_tmo(phba, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f93799873721..7dc4c2e6bed2 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -629,8 +629,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq;
IOCB_t *piocb;
struct fcp_cmnd *fcp_cmnd;
- struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device;
- struct lpfc_rport_data *rdata = scsi_dev->hostdata;
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
@@ -665,56 +664,18 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
piocb->ulpTimeout = lpfc_cmd->timeout;
}
- lpfc_cmd->rdata = rdata;
-
- switch (task_mgmt_cmd) {
- case FCP_LUN_RESET:
- /* Issue LUN Reset to TGT <num> LUN <num> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_FCP,
- "%d:0703 Issue LUN Reset to TGT %d LUN %d "
- "Data: x%x x%x\n",
- phba->brd_no,
- scsi_dev->id, scsi_dev->lun,
- ndlp->nlp_rpi, ndlp->nlp_flag);
-
- break;
- case FCP_ABORT_TASK_SET:
- /* Issue Abort Task Set to TGT <num> LUN <num> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_FCP,
- "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
- "Data: x%x x%x\n",
- phba->brd_no,
- scsi_dev->id, scsi_dev->lun,
- ndlp->nlp_rpi, ndlp->nlp_flag);
-
- break;
- case FCP_TARGET_RESET:
- /* Issue Target Reset to TGT <num> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_FCP,
- "%d:0702 Issue Target Reset to TGT %d "
- "Data: x%x x%x\n",
- phba->brd_no,
- scsi_dev->id, ndlp->nlp_rpi,
- ndlp->nlp_flag);
- break;
- }
-
return (1);
}
static int
-lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
+lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
+ unsigned tgt_id, struct lpfc_rport_data *rdata)
{
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
int ret;
+ lpfc_cmd->rdata = rdata;
ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
if (!ret)
return FAILED;
@@ -726,6 +687,13 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
if (!iocbqrsp)
return FAILED;
+ /* Issue Target Reset to TGT <num> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+ "%d:0702 Issue Target Reset to TGT %d "
+ "Data: x%x x%x\n",
+ phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
+ rdata->pnode->nlp_flag);
+
ret = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -1021,6 +989,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->timeout = 60;
lpfc_cmd->scsi_hba = phba;
+ lpfc_cmd->rdata = rdata;
ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
if (!ret)
@@ -1033,6 +1002,11 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
if (iocbqrsp == NULL)
goto out_free_scsi_buf;
+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
+ "%d:0703 Issue LUN Reset to TGT %d LUN %d "
+ "Data: x%x x%x\n", phba->brd_no, cmnd->device->id,
+ cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
+
ret = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -1104,7 +1078,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
int match;
int ret = FAILED, i, err_count = 0;
int cnt, loopcnt;
- unsigned int midlayer_id = 0;
struct lpfc_scsi_buf * lpfc_cmd;
lpfc_block_requests(phba);
@@ -1124,7 +1097,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
* targets known to the driver. Should any target reset
* fail, this routine returns failure to the midlayer.
*/
- midlayer_id = cmnd->device->id;
for (i = 0; i < MAX_FCP_TARGET; i++) {
/* Search the mapped list for this target ID */
match = 0;
@@ -1137,9 +1109,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
if (!match)
continue;
- lpfc_cmd->pCmd->device->id = i;
- lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
- ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
+ ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba,
+ i, ndlp->rport->dd_data);
if (ret != SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0713 Bus Reset on target %d failed\n",
@@ -1158,7 +1129,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
* the targets. Unfortunately, some targets do not abide by
* this forcing the driver to double check.
*/
- cmnd->device->id = midlayer_id;
cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST);
if (cnt)
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4cf1366108b7..6b737568b831 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.1.4"
+#define LPFC_DRIVER_VERSION "8.1.6"
#define LPFC_DRIVER_NAME "lpfc"
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 80b68a2481b3..de35ffe2f79d 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4471,7 +4471,6 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
{
Scsi_Cmnd *scmd;
struct scsi_device *sdev;
- unsigned long flags = 0;
scb_t *scb;
int rval;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index c11e5ce6865e..bec1424eda85 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_mbox.c
- * Version : v2.20.4.7 (Nov 14 2005)
+ * Version : v2.20.4.8 (Apr 11 2006)
*
* Authors:
* Atul Mukker <Atul.Mukker@lsil.com>
@@ -2278,6 +2278,7 @@ megaraid_mbox_dpc(unsigned long devp)
unsigned long flags;
uint8_t c;
int status;
+ uioc_t *kioc;
if (!adapter) return;
@@ -2320,6 +2321,9 @@ megaraid_mbox_dpc(unsigned long devp)
// remove from local clist
list_del_init(&scb->list);
+ kioc = (uioc_t *)scb->gp;
+ kioc->status = 0;
+
megaraid_mbox_mm_done(adapter, scb);
continue;
@@ -2636,6 +2640,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
int recovery_window;
int recovering;
int i;
+ uioc_t *kioc;
adapter = SCP2ADAPTER(scp);
raid_dev = ADAP2RAIDDEV(adapter);
@@ -2655,32 +2660,51 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
// Also, reset all the commands currently owned by the driver
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
-
list_del_init(&scb->list); // from pending list
- con_log(CL_ANN, (KERN_WARNING
- "megaraid: %ld:%d[%d:%d], reset from pending list\n",
- scp->serial_number, scb->sno,
- scb->dev_channel, scb->dev_target));
+ if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: IOCTL packet with %d[%d:%d] being reset\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
- scp->result = (DID_RESET << 16);
- scp->scsi_done(scp);
+ scb->status = -1;
- megaraid_dealloc_scb(adapter, scb);
+ kioc = (uioc_t *)scb->gp;
+ kioc->status = -EFAULT;
+
+ megaraid_mbox_mm_done(adapter, scb);
+ } else {
+ if (scb->scp == scp) { // Found command
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: %ld:%d[%d:%d], reset from pending list\n",
+ scp->serial_number, scb->sno,
+ scb->dev_channel, scb->dev_target));
+ } else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: IO packet with %d[%d:%d] being reset\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
+ }
+
+ scb->scp->result = (DID_RESET << 16);
+ scb->scp->scsi_done(scb->scp);
+
+ megaraid_dealloc_scb(adapter, scb);
+ }
}
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
if (adapter->outstanding_cmds) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: %d outstanding commands. Max wait %d sec\n",
- adapter->outstanding_cmds, MBOX_RESET_WAIT));
+ adapter->outstanding_cmds,
+ (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
}
recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
recovering = adapter->outstanding_cmds;
- for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) {
+ for (i = 0; i < recovery_window; i++) {
megaraid_ack_sequence(adapter);
@@ -2689,12 +2713,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
con_log(CL_ANN, (
"megaraid mbox: Wait for %d commands to complete:%d\n",
adapter->outstanding_cmds,
- MBOX_RESET_WAIT - i));
+ (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
}
// bailout if no recovery happended in reset time
- if ((i == MBOX_RESET_WAIT) &&
- (recovering == adapter->outstanding_cmds)) {
+ if (adapter->outstanding_cmds == 0) {
break;
}
@@ -2918,12 +2941,13 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
wmb();
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
- for (i = 0; i < 0xFFFFF; i++) {
+ for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
if (mbox->numstatus != 0xFF) break;
rmb();
+ udelay(MBOX_SYNC_DELAY_200);
}
- if (i == 0xFFFFF) {
+ if (i == MBOX_SYNC_WAIT_CNT) {
// We may need to re-calibrate the counter
con_log(CL_ANN, (KERN_CRIT
"megaraid: fast sync command timed out\n"));
@@ -3475,7 +3499,7 @@ megaraid_cmm_register(adapter_t *adapter)
adp.drvr_data = (unsigned long)adapter;
adp.pdev = adapter->pdev;
adp.issue_uioc = megaraid_mbox_mm_handler;
- adp.timeout = 300;
+ adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
adp.max_kioc = MBOX_MAX_USER_CMDS;
if ((rval = mraid_mm_register_adp(&adp)) != 0) {
@@ -3702,7 +3726,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
unsigned long flags;
kioc = (uioc_t *)scb->gp;
- kioc->status = 0;
mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
mbox64->mbox32.status = scb->status;
raw_mbox = (uint8_t *)&mbox64->mbox32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 882fb1a0b575..868fb0ec93e7 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
#include "megaraid_ioctl.h"
-#define MEGARAID_VERSION "2.20.4.7"
-#define MEGARAID_EXT_VERSION "(Release Date: Mon Nov 14 12:27:22 EST 2005)"
+#define MEGARAID_VERSION "2.20.4.8"
+#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)"
/*
@@ -100,6 +100,9 @@
#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox
#define MBOX_RESET_WAIT 180 // wait these many seconds in reset
#define MBOX_RESET_EXT_WAIT 120 // extended wait reset
+#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode
+
+#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds
/*
* maximum transfer that can happen through the firmware commands issued
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8f3ce0432295..e8f534fb336b 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -898,10 +898,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
- if (!adapter) {
- rval = -ENOMEM;
- goto memalloc_error;
- }
+ if (!adapter)
+ return -ENOMEM;
memset(adapter, 0, sizeof(mraid_mmadp_t));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 017729c59a49..584fe5d8e507 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -599,6 +599,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
* Either SUCCESS or FAILED.
*
* Note:
+* Only return FAILED if command not returned by firmware.
**************************************************************************/
int
qla2xxx_eh_abort(struct scsi_cmnd *cmd)
@@ -609,11 +610,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned int id, lun;
unsigned long serial;
unsigned long flags;
+ int wait = 0;
if (!CMD_SP(cmd))
- return FAILED;
+ return SUCCESS;
- ret = FAILED;
+ ret = SUCCESS;
id = cmd->device->id;
lun = cmd->device->lun;
@@ -642,7 +644,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
} else {
DEBUG3(printk("%s(%ld): abort_command "
"mbx success.\n", __func__, ha->host_no));
- ret = SUCCESS;
+ wait = 1;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -651,17 +653,18 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for the command to be returned. */
- if (ret == SUCCESS) {
+ if (wait) {
if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
"scsi(%ld:%d:%d): Abort handler timed out -- %lx "
"%x.\n", ha->host_no, id, lun, serial, ret);
+ ret = FAILED;
}
}
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no,
- id, lun, serial, ret);
+ "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
+ ha->host_no, id, lun, wait, serial, ret);
return ret;
}
@@ -1700,8 +1703,8 @@ qla2x00_free_device(scsi_qla_host_t *ha)
ha->flags.online = 0;
/* Detach interrupts */
- if (ha->pdev->irq)
- free_irq(ha->pdev->irq, ha);
+ if (ha->host->irq)
+ free_irq(ha->host->irq, ha);
/* release io space registers */
if (ha->iobase)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c750d3399a97..941c1e15c899 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -56,6 +56,8 @@ static struct {
{"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */
{"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */
{"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */
+ {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */
+ {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */
{"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7b0f9a3810d2..764a8b375ead 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1067,16 +1067,29 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
break;
case NOT_READY:
/*
- * If the device is in the process of becoming ready,
- * retry.
+ * If the device is in the process of becoming
+ * ready, or has a temporary blockage, retry.
*/
- if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
- scsi_requeue_command(q, cmd);
- return;
+ if (sshdr.asc == 0x04) {
+ switch (sshdr.ascq) {
+ case 0x01: /* becoming ready */
+ case 0x04: /* format in progress */
+ case 0x05: /* rebuild in progress */
+ case 0x06: /* recalculation in progress */
+ case 0x07: /* operation in progress */
+ case 0x08: /* Long write in progress */
+ case 0x09: /* self test in progress */
+ scsi_requeue_command(q, cmd);
+ return;
+ default:
+ break;
+ }
}
- if (!(req->flags & REQ_QUIET))
+ if (!(req->flags & REQ_QUIET)) {
scmd_printk(KERN_INFO, cmd,
- "Device not ready.\n");
+ "Device not ready: ");
+ scsi_print_sense_hdr("", &sshdr);
+ }
scsi_end_request(cmd, 0, this_count, 1);
return;
case VOLUME_OVERFLOW:
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 3274ab76c8d3..255886a9ac55 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -75,7 +75,7 @@ param_setup(char *str)
else if(!strncmp(pos, "id:", 3)) {
if(slot == -1) {
printk(KERN_WARNING "sim710: Must specify slot for id parameter\n");
- } else if(slot > MAX_SLOTS) {
+ } else if(slot >= MAX_SLOTS) {
printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val);
} else {
id_array[slot] = val;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 674b15c78f68..bbf78aaf9e01 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -362,6 +362,40 @@ serial_out(struct uart_8250_port *up, int offset, int value)
#define serial_inp(up, offset) serial_in(up, offset)
#define serial_outp(up, offset, value) serial_out(up, offset, value)
+/* Uart divisor latch read */
+static inline int _serial_dl_read(struct uart_8250_port *up)
+{
+ return serial_inp(up, UART_DLL) | serial_inp(up, UART_DLM) << 8;
+}
+
+/* Uart divisor latch write */
+static inline void _serial_dl_write(struct uart_8250_port *up, int value)
+{
+ serial_outp(up, UART_DLL, value & 0xff);
+ serial_outp(up, UART_DLM, value >> 8 & 0xff);
+}
+
+#ifdef CONFIG_SERIAL_8250_AU1X00
+/* Au1x00 haven't got a standard divisor latch */
+static int serial_dl_read(struct uart_8250_port *up)
+{
+ if (up->port.iotype == UPIO_AU)
+ return __raw_readl(up->port.membase + 0x28);
+ else
+ return _serial_dl_read(up);
+}
+
+static void serial_dl_write(struct uart_8250_port *up, int value)
+{
+ if (up->port.iotype == UPIO_AU)
+ __raw_writel(value, up->port.membase + 0x28);
+ else
+ _serial_dl_write(up, value);
+}
+#else
+#define serial_dl_read(up) _serial_dl_read(up)
+#define serial_dl_write(up, value) _serial_dl_write(up, value)
+#endif
/*
* For the 16C950
@@ -494,7 +528,8 @@ static void disable_rsa(struct uart_8250_port *up)
*/
static int size_fifo(struct uart_8250_port *up)
{
- unsigned char old_fcr, old_mcr, old_dll, old_dlm, old_lcr;
+ unsigned char old_fcr, old_mcr, old_lcr;
+ unsigned short old_dl;
int count;
old_lcr = serial_inp(up, UART_LCR);
@@ -505,10 +540,8 @@ static int size_fifo(struct uart_8250_port *up)
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_outp(up, UART_MCR, UART_MCR_LOOP);
serial_outp(up, UART_LCR, UART_LCR_DLAB);
- old_dll = serial_inp(up, UART_DLL);
- old_dlm = serial_inp(up, UART_DLM);
- serial_outp(up, UART_DLL, 0x01);
- serial_outp(up, UART_DLM, 0x00);
+ old_dl = serial_dl_read(up);
+ serial_dl_write(up, 0x0001);
serial_outp(up, UART_LCR, 0x03);
for (count = 0; count < 256; count++)
serial_outp(up, UART_TX, count);
@@ -519,8 +552,7 @@ static int size_fifo(struct uart_8250_port *up)
serial_outp(up, UART_FCR, old_fcr);
serial_outp(up, UART_MCR, old_mcr);
serial_outp(up, UART_LCR, UART_LCR_DLAB);
- serial_outp(up, UART_DLL, old_dll);
- serial_outp(up, UART_DLM, old_dlm);
+ serial_dl_write(up, old_dl);
serial_outp(up, UART_LCR, old_lcr);
return count;
@@ -750,8 +782,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
serial_outp(up, UART_LCR, 0xE0);
- quot = serial_inp(up, UART_DLM) << 8;
- quot += serial_inp(up, UART_DLL);
+ quot = serial_dl_read(up);
quot <<= 3;
status1 = serial_in(up, 0x04); /* EXCR1 */
@@ -759,8 +790,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
serial_outp(up, 0x04, status1);
- serial_outp(up, UART_DLL, quot & 0xff);
- serial_outp(up, UART_DLM, quot >> 8);
+ serial_dl_write(up, quot);
serial_outp(up, UART_LCR, 0);
@@ -1862,8 +1892,7 @@ serial8250_set_termios(struct uart_port *port, struct termios *termios,
serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
}
- serial_outp(up, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_outp(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_dl_write(up, quot);
/*
* LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
@@ -1906,6 +1935,9 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
int ret = 0;
switch (up->port.iotype) {
+ case UPIO_AU:
+ size = 0x100000;
+ /* fall thru */
case UPIO_MEM:
if (!up->port.mapbase)
break;
@@ -1938,6 +1970,9 @@ static void serial8250_release_std_resource(struct uart_8250_port *up)
unsigned int size = 8 << up->port.regshift;
switch (up->port.iotype) {
+ case UPIO_AU:
+ size = 0x100000;
+ /* fall thru */
case UPIO_MEM:
if (!up->port.mapbase)
break;
@@ -2200,10 +2235,17 @@ static void
serial8250_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_8250_port *up = &serial8250_ports[co->index];
+ unsigned long flags;
unsigned int ier;
+ int locked = 1;
touch_nmi_watchdog();
+ if (oops_in_progress) {
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ } else
+ spin_lock_irqsave(&up->port.lock, flags);
+
/*
* First save the IER then disable the interrupts
*/
@@ -2221,8 +2263,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
* and restore the IER
*/
wait_for_xmitr(up, BOTH_EMPTY);
- up->ier |= UART_IER_THRI;
- serial_out(up, UART_IER, ier | UART_IER_THRI);
+ serial_out(up, UART_IER, ier);
+
+ if (locked)
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int serial8250_console_setup(struct console *co, char *options)
diff --git a/drivers/serial/8250_au1x00.c b/drivers/serial/8250_au1x00.c
index 3d1bfd07208d..58015fd14be9 100644
--- a/drivers/serial/8250_au1x00.c
+++ b/drivers/serial/8250_au1x00.c
@@ -30,13 +30,12 @@
{ \
.iobase = _base, \
.membase = (void __iomem *)_base,\
- .mapbase = _base, \
+ .mapbase = CPHYSADDR(_base), \
.irq = _irq, \
.uartclk = 0, /* filled */ \
.regshift = 2, \
.iotype = UPIO_AU, \
- .flags = UPF_SKIP_TEST | \
- UPF_IOREMAP, \
+ .flags = UPF_SKIP_TEST \
}
static struct plat_serial8250_port au1x00_data[] = {
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index fcd7744c4253..aeb8153ccf24 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1500,20 +1500,18 @@ uart_block_til_ready(struct file *filp, struct uart_state *state)
static struct uart_state *uart_get(struct uart_driver *drv, int line)
{
struct uart_state *state;
+ int ret = 0;
- mutex_lock(&port_mutex);
state = drv->state + line;
if (mutex_lock_interruptible(&state->mutex)) {
- state = ERR_PTR(-ERESTARTSYS);
- goto out;
+ ret = -ERESTARTSYS;
+ goto err;
}
state->count++;
- if (!state->port) {
- state->count--;
- mutex_unlock(&state->mutex);
- state = ERR_PTR(-ENXIO);
- goto out;
+ if (!state->port || state->port->flags & UPF_DEAD) {
+ ret = -ENXIO;
+ goto err_unlock;
}
if (!state->info) {
@@ -1531,15 +1529,17 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
tasklet_init(&state->info->tlet, uart_tasklet_action,
(unsigned long)state);
} else {
- state->count--;
- mutex_unlock(&state->mutex);
- state = ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_unlock;
}
}
-
- out:
- mutex_unlock(&port_mutex);
return state;
+
+ err_unlock:
+ state->count--;
+ mutex_unlock(&state->mutex);
+ err:
+ return ERR_PTR(ret);
}
/*
@@ -2085,45 +2085,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
}
}
-/*
- * This reverses the effects of uart_configure_port, hanging up the
- * port before removal.
- */
-static void
-uart_unconfigure_port(struct uart_driver *drv, struct uart_state *state)
-{
- struct uart_port *port = state->port;
- struct uart_info *info = state->info;
-
- if (info && info->tty)
- tty_vhangup(info->tty);
-
- mutex_lock(&state->mutex);
-
- state->info = NULL;
-
- /*
- * Free the port IO and memory resources, if any.
- */
- if (port->type != PORT_UNKNOWN)
- port->ops->release_port(port);
-
- /*
- * Indicate that there isn't a port here anymore.
- */
- port->type = PORT_UNKNOWN;
-
- /*
- * Kill the tasklet, and free resources.
- */
- if (info) {
- tasklet_kill(&info->tlet);
- kfree(info);
- }
-
- mutex_unlock(&state->mutex);
-}
-
static struct tty_operations uart_ops = {
.open = uart_open,
.close = uart_close,
@@ -2270,6 +2231,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
state = drv->state + port->line;
mutex_lock(&port_mutex);
+ mutex_lock(&state->mutex);
if (state->port) {
ret = -EINVAL;
goto out;
@@ -2304,7 +2266,13 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
port->cons && !(port->cons->flags & CON_ENABLED))
register_console(port->cons);
+ /*
+ * Ensure UPF_DEAD is not set.
+ */
+ port->flags &= ~UPF_DEAD;
+
out:
+ mutex_unlock(&state->mutex);
mutex_unlock(&port_mutex);
return ret;
@@ -2322,6 +2290,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
{
struct uart_state *state = drv->state + port->line;
+ struct uart_info *info;
BUG_ON(in_interrupt());
@@ -2332,11 +2301,48 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
mutex_lock(&port_mutex);
/*
+ * Mark the port "dead" - this prevents any opens from
+ * succeeding while we shut down the port.
+ */
+ mutex_lock(&state->mutex);
+ port->flags |= UPF_DEAD;
+ mutex_unlock(&state->mutex);
+
+ /*
* Remove the devices from devfs
*/
tty_unregister_device(drv->tty_driver, port->line);
- uart_unconfigure_port(drv, state);
+ info = state->info;
+ if (info && info->tty)
+ tty_vhangup(info->tty);
+
+ /*
+ * All users of this port should now be disconnected from
+ * this driver, and the port shut down. We should be the
+ * only thread fiddling with this port from now on.
+ */
+ state->info = NULL;
+
+ /*
+ * Free the port IO and memory resources, if any.
+ */
+ if (port->type != PORT_UNKNOWN)
+ port->ops->release_port(port);
+
+ /*
+ * Indicate that there isn't a port here anymore.
+ */
+ port->type = PORT_UNKNOWN;
+
+ /*
+ * Kill the tasklet, and free resources.
+ */
+ if (info) {
+ tasklet_kill(&info->tlet);
+ kfree(info);
+ }
+
state->port = NULL;
mutex_unlock(&port_mutex);
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index 4ef5cd19609d..b985dfad6c63 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -34,7 +34,7 @@ extra-y += $(call logo-cfiles,_clut224,ppm)
extra-y += $(call logo-cfiles,_gray256,pgm)
# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
-quiet_cmd_logo = LOGO $@
+quiet_cmd_logo = LOGO $@
cmd_logo = scripts/pnmtologo \
-t $(patsubst $*_%,%,$(notdir $(basename $<))) \
-n $(notdir $(basename $<)) -o $@ $<
diff --git a/fs/locks.c b/fs/locks.c
index efad798824dc..6f99c0a6f836 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -446,15 +446,14 @@ static struct lock_manager_operations lease_manager_ops = {
*/
static int lease_init(struct file *filp, int type, struct file_lock *fl)
{
+ if (assign_type(fl, type) != 0)
+ return -EINVAL;
+
fl->fl_owner = current->files;
fl->fl_pid = current->tgid;
fl->fl_file = filp;
fl->fl_flags = FL_LEASE;
- if (assign_type(fl, type) != 0) {
- locks_free_lock(fl);
- return -EINVAL;
- }
fl->fl_start = 0;
fl->fl_end = OFFSET_MAX;
fl->fl_ops = NULL;
@@ -466,16 +465,19 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
static int lease_alloc(struct file *filp, int type, struct file_lock **flp)
{
struct file_lock *fl = locks_alloc_lock();
- int error;
+ int error = -ENOMEM;
if (fl == NULL)
- return -ENOMEM;
+ goto out;
error = lease_init(filp, type, fl);
- if (error)
- return error;
+ if (error) {
+ locks_free_lock(fl);
+ fl = NULL;
+ }
+out:
*flp = fl;
- return 0;
+ return error;
}
/* Check if two locks overlap each other.
@@ -1372,6 +1374,7 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp)
goto out;
if (my_before != NULL) {
+ *flp = *my_before;
error = lease->fl_lmops->fl_change(my_before, arg);
goto out;
}
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 64ee07db0d5e..8558226281c4 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1942,8 +1942,10 @@ xfs_alloc_fix_freelist(
/*
* Allocate as many blocks as possible at once.
*/
- if ((error = xfs_alloc_ag_vextent(&targs)))
+ if ((error = xfs_alloc_ag_vextent(&targs))) {
+ xfs_trans_brelse(tp, agflbp);
return error;
+ }
/*
* Stop if we run out. Won't happen if callers are obeying
* the restrictions correctly. Can happen for free calls
@@ -1960,6 +1962,7 @@ xfs_alloc_fix_freelist(
return error;
}
}
+ xfs_trans_brelse(tp, agflbp);
args->agbp = agbp;
return 0;
}
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 81a05cfd77d2..1f148762eb28 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -316,6 +316,18 @@ xfs_rename(
}
}
+ /*
+ * If we are using project inheritance, we only allow renames
+ * into our tree when the project IDs are the same; else the
+ * tree quota mechanism would be circumvented.
+ */
+ if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
+ (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) {
+ error = XFS_ERROR(EXDEV);
+ xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
+ goto rele_return;
+ }
+
new_parent = (src_dp != target_dp);
src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index f0e09ca14139..36ea1b2094f2 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -669,31 +669,22 @@ xfs_mntupdate(
xfs_mount_t *mp = XFS_BHVTOM(bdp);
int error;
- if (args->flags & XFSMNT_BARRIER)
- mp->m_flags |= XFS_MOUNT_BARRIER;
- else
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
-
- if ((vfsp->vfs_flag & VFS_RDONLY) &&
- !(*flags & MS_RDONLY)) {
- vfsp->vfs_flag &= ~VFS_RDONLY;
-
- if (args->flags & XFSMNT_BARRIER)
+ if (!(*flags & MS_RDONLY)) { /* rw/ro -> rw */
+ if (vfsp->vfs_flag & VFS_RDONLY)
+ vfsp->vfs_flag &= ~VFS_RDONLY;
+ if (args->flags & XFSMNT_BARRIER) {
+ mp->m_flags |= XFS_MOUNT_BARRIER;
xfs_mountfs_check_barriers(mp);
- }
-
- if (!(vfsp->vfs_flag & VFS_RDONLY) &&
- (*flags & MS_RDONLY)) {
+ } else {
+ mp->m_flags &= ~XFS_MOUNT_BARRIER;
+ }
+ } else if (!(vfsp->vfs_flag & VFS_RDONLY)) { /* rw -> ro */
VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error);
-
xfs_quiesce_fs(mp);
-
- /* Ok now write out an unmount record */
xfs_log_unmount_write(mp);
xfs_unmountfs_writesb(mp);
vfsp->vfs_flag |= VFS_RDONLY;
}
-
return 0;
}
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index fa71b305ba5c..7027ae68ee38 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2663,7 +2663,7 @@ xfs_link(
*/
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
(tdp->i_d.di_projid != sip->i_d.di_projid))) {
- error = XFS_ERROR(EPERM);
+ error = XFS_ERROR(EXDEV);
goto error_return;
}
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
index 51c4e5fe6062..d92e253f7f6f 100644
--- a/include/asm-i386/io_apic.h
+++ b/include/asm-i386/io_apic.h
@@ -200,6 +200,7 @@ extern int io_apic_get_unique_id (int ioapic, int apic_id);
extern int io_apic_get_version (int ioapic);
extern int io_apic_get_redir_entries (int ioapic);
extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low);
+extern int timer_uses_ioapic_pin_0;
#endif /* CONFIG_ACPI */
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 90921e162793..6cc517e212a9 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -11,7 +11,6 @@
#include <linux/compiler.h>
#include <linux/types.h>
-#include <asm/bitops.h>
#include <asm/intrinsics.h>
/**
diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h
index a70ba2ee552d..0fb68a0b0181 100644
--- a/include/asm-ppc/page.h
+++ b/include/asm-ppc/page.h
@@ -20,6 +20,7 @@
/* This must match what is in arch/ppc/Makefile */
#define PAGE_OFFSET CONFIG_KERNEL_START
#define KERNELBASE PAGE_OFFSET
+#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
#ifndef __ASSEMBLY__
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
index 93b51df51687..670a3388e70a 100644
--- a/include/asm-x86_64/e820.h
+++ b/include/asm-x86_64/e820.h
@@ -59,6 +59,8 @@ extern void __init parse_memopt(char *p, char **end);
extern void __init parse_memmapopt(char *p, char **end);
extern struct e820map e820;
+
+extern unsigned ebda_addr, ebda_size;
#endif/*!__ASSEMBLY__*/
#endif/*__E820_HEADER*/
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
index ee1bc69aec9c..52484e82c641 100644
--- a/include/asm-x86_64/io_apic.h
+++ b/include/asm-x86_64/io_apic.h
@@ -205,6 +205,7 @@ extern int skip_ioapic_setup;
extern int io_apic_get_version (int ioapic);
extern int io_apic_get_redir_entries (int ioapic);
extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int);
+extern int timer_uses_ioapic_pin_0;
#endif
extern int sis_apic_bug; /* dummy */
diff --git a/include/linux/device.h b/include/linux/device.h
index f6e72a65a3f2..e8e53b9accc6 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -200,6 +200,7 @@ extern int class_device_create_file(struct class_device *,
* @node: for internal use by the driver core only.
* @kobj: for internal use by the driver core only.
* @devt_attr: for internal use by the driver core only.
+ * @groups: optional additional groups to be created
* @dev: if set, a symlink to the struct device is created in the sysfs
* directory for this struct class device.
* @class_data: pointer to whatever you want to store here for this struct
@@ -228,6 +229,7 @@ struct class_device {
struct device * dev; /* not necessary, but nice to have */
void * class_data; /* class-specific data */
struct class_device *parent; /* parent of this child device, if there is one */
+ struct attribute_group ** groups; /* optional groups */
void (*release)(struct class_device *dev);
int (*uevent)(struct class_device *dev, char **envp,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ff61817082fa..635690cf3e3d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -14,6 +14,7 @@ enum dma_data_direction {
};
#define DMA_64BIT_MASK 0xffffffffffffffffULL
+#define DMA_48BIT_MASK 0x0000ffffffffffffULL
#define DMA_40BIT_MASK 0x000000ffffffffffULL
#define DMA_39BIT_MASK 0x0000007fffffffffULL
#define DMA_32BIT_MASK 0x00000000ffffffffULL
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 01db7b88a2b1..f4169bbb60eb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -433,8 +433,7 @@ struct net_device
/* register/unregister state machine */
enum { NETREG_UNINITIALIZED=0,
- NETREG_REGISTERING, /* called register_netdevice */
- NETREG_REGISTERED, /* completed register todo */
+ NETREG_REGISTERED, /* completed register_netdevice */
NETREG_UNREGISTERING, /* called unregister_netdevice */
NETREG_UNREGISTERED, /* completed unregister todo */
NETREG_RELEASED, /* called free_netdev */
@@ -506,6 +505,8 @@ struct net_device
/* class/net/name entry */
struct class_device class_dev;
+ /* space for optional statistics and wireless sysfs groups */
+ struct attribute_group *sysfs_groups[3];
};
#define NETDEV_ALIGN 32
@@ -829,21 +830,19 @@ static inline void netif_rx_schedule(struct net_device *dev)
__netif_rx_schedule(dev);
}
-
-static inline void __netif_rx_reschedule(struct net_device *dev, int undo)
-{
- dev->quota += undo;
- list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
-}
-
-/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
+/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().
+ * Do not inline this?
+ */
static inline int netif_rx_reschedule(struct net_device *dev, int undo)
{
if (netif_rx_schedule_prep(dev)) {
unsigned long flags;
+
+ dev->quota += undo;
+
local_irq_save(flags);
- __netif_rx_reschedule(dev, undo);
+ list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
local_irq_restore(flags);
return 1;
}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index c32e60e79dea..bd14858121ea 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -254,6 +254,7 @@ struct uart_port {
#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
+#define UPF_DEAD ((__force upf_t) (1 << 30))
#define UPF_IOREMAP ((__force upf_t) (1 << 31))
#define UPF_CHANGE_MASK ((__force upf_t) (0x17fff))
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index 4725ff861c57..d5926bfb1fc9 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -955,11 +955,13 @@ enum ieee80211_state {
#define IEEE80211_24GHZ_MIN_CHANNEL 1
#define IEEE80211_24GHZ_MAX_CHANNEL 14
-#define IEEE80211_24GHZ_CHANNELS 14
+#define IEEE80211_24GHZ_CHANNELS (IEEE80211_24GHZ_MAX_CHANNEL - \
+ IEEE80211_24GHZ_MIN_CHANNEL + 1)
#define IEEE80211_52GHZ_MIN_CHANNEL 34
#define IEEE80211_52GHZ_MAX_CHANNEL 165
-#define IEEE80211_52GHZ_CHANNELS 131
+#define IEEE80211_52GHZ_CHANNELS (IEEE80211_52GHZ_MAX_CHANNEL - \
+ IEEE80211_52GHZ_MIN_CHANNEL + 1)
enum {
IEEE80211_CH_PASSIVE_ONLY = (1 << 0),
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h
index b1ebfbae397f..052ed596a4e4 100644
--- a/include/net/ieee80211softmac.h
+++ b/include/net/ieee80211softmac.h
@@ -204,7 +204,8 @@ struct ieee80211softmac_device {
/* couple of flags */
u8 scanning:1, /* protects scanning from being done multiple times at once */
- associated:1;
+ associated:1,
+ running:1;
struct ieee80211softmac_scaninfo *scaninfo;
struct ieee80211softmac_assoc_info associnfo;
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 6c2681dc5b46..637f77eccf0c 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -95,14 +95,15 @@ struct srp_direct_buf {
/*
* We need the packed attribute because the SRP spec puts the list of
- * descriptors at an offset of 20, which is not aligned to the size
- * of struct srp_direct_buf.
+ * descriptors at an offset of 20, which is not aligned to the size of
+ * struct srp_direct_buf. The whole structure must be packed to avoid
+ * having the 20-byte structure padded to 24 bytes on 64-bit architectures.
*/
struct srp_indirect_buf {
struct srp_direct_buf table_desc;
__be32 len;
- struct srp_direct_buf desc_list[0] __attribute__((packed));
-};
+ struct srp_direct_buf desc_list[0];
+} __attribute__((packed));
enum {
SRP_MULTICHAN_SINGLE = 0,
@@ -122,6 +123,11 @@ struct srp_login_req {
u8 target_port_id[16];
};
+/*
+ * The SRP spec defines the size of the LOGIN_RSP structure to be 52
+ * bytes, so it needs to be packed to avoid having it padded to 56
+ * bytes on 64-bit architectures.
+ */
struct srp_login_rsp {
u8 opcode;
u8 reserved1[3];
@@ -132,7 +138,7 @@ struct srp_login_rsp {
__be16 buf_fmt;
u8 rsp_flags;
u8 reserved2[25];
-};
+} __attribute__((packed));
struct srp_login_rej {
u8 opcode;
@@ -207,6 +213,11 @@ enum {
SRP_RSP_FLAG_DIUNDER = 1 << 5
};
+/*
+ * The SRP spec defines the size of the RSP structure to be 36 bytes,
+ * so it needs to be packed to avoid having it padded to 40 bytes on
+ * 64-bit architectures.
+ */
struct srp_rsp {
u8 opcode;
u8 sol_not;
@@ -221,6 +232,6 @@ struct srp_rsp {
__be32 sense_data_len;
__be32 resp_data_len;
u8 data[0];
-};
+} __attribute__((packed));
#endif /* SCSI_SRP_H */
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b0f8da80d7d4..921c22ad16e4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -155,8 +155,26 @@ int ptrace_attach(struct task_struct *task)
if (task->tgid == current->tgid)
goto out;
- write_lock_irq(&tasklist_lock);
+repeat:
+ /*
+ * Nasty, nasty.
+ *
+ * We want to hold both the task-lock and the
+ * tasklist_lock for writing at the same time.
+ * But that's against the rules (tasklist_lock
+ * is taken for reading by interrupts on other
+ * cpu's that may have task_lock).
+ */
task_lock(task);
+ local_irq_disable();
+ if (!write_trylock(&tasklist_lock)) {
+ local_irq_enable();
+ task_unlock(task);
+ do {
+ cpu_relax();
+ } while (!write_can_lock(&tasklist_lock));
+ goto repeat;
+ }
/* the same process cannot be attached many times */
if (task->ptrace & PT_PTRACED)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 59eef42d4a42..ad1c7af65ec8 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -308,26 +308,19 @@ int br_add_bridge(const char *name)
if (ret)
goto err2;
- /* network device kobject is not setup until
- * after rtnl_unlock does it's hotplug magic.
- * so hold reference to avoid race.
- */
- dev_hold(dev);
- rtnl_unlock();
-
ret = br_sysfs_addbr(dev);
- dev_put(dev);
-
- if (ret)
- unregister_netdev(dev);
- out:
- return ret;
+ if (ret)
+ goto err3;
+ rtnl_unlock();
+ return 0;
+ err3:
+ unregister_netdev(dev);
err2:
free_netdev(dev);
err1:
rtnl_unlock();
- goto out;
+ return ret;
}
int br_del_bridge(const char *name)
diff --git a/net/core/dev.c b/net/core/dev.c
index 3bad1afc89fa..2dce673a039b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -193,7 +193,7 @@ static inline struct hlist_head *dev_index_hash(int ifindex)
* Our notifier list
*/
-static BLOCKING_NOTIFIER_HEAD(netdev_chain);
+static RAW_NOTIFIER_HEAD(netdev_chain);
/*
* Device drivers call our routines to queue packets here. We empty the
@@ -736,7 +736,7 @@ int dev_change_name(struct net_device *dev, char *newname)
if (!err) {
hlist_del(&dev->name_hlist);
hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
- blocking_notifier_call_chain(&netdev_chain,
+ raw_notifier_call_chain(&netdev_chain,
NETDEV_CHANGENAME, dev);
}
@@ -751,7 +751,7 @@ int dev_change_name(struct net_device *dev, char *newname)
*/
void netdev_features_change(struct net_device *dev)
{
- blocking_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
+ raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
}
EXPORT_SYMBOL(netdev_features_change);
@@ -766,7 +766,7 @@ EXPORT_SYMBOL(netdev_features_change);
void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
- blocking_notifier_call_chain(&netdev_chain,
+ raw_notifier_call_chain(&netdev_chain,
NETDEV_CHANGE, dev);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
}
@@ -864,7 +864,7 @@ int dev_open(struct net_device *dev)
/*
* ... and announce new interface.
*/
- blocking_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
}
return ret;
}
@@ -887,7 +887,7 @@ int dev_close(struct net_device *dev)
* Tell people we are going down, so that they can
* prepare to death, when device is still operating.
*/
- blocking_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
+ raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
dev_deactivate(dev);
@@ -924,7 +924,7 @@ int dev_close(struct net_device *dev)
/*
* Tell people we are down
*/
- blocking_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+ raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
return 0;
}
@@ -955,7 +955,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
int err;
rtnl_lock();
- err = blocking_notifier_chain_register(&netdev_chain, nb);
+ err = raw_notifier_chain_register(&netdev_chain, nb);
if (!err) {
for (dev = dev_base; dev; dev = dev->next) {
nb->notifier_call(nb, NETDEV_REGISTER, dev);
@@ -983,7 +983,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
int err;
rtnl_lock();
- err = blocking_notifier_chain_unregister(&netdev_chain, nb);
+ err = raw_notifier_chain_unregister(&netdev_chain, nb);
rtnl_unlock();
return err;
}
@@ -994,12 +994,12 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
* @v: pointer passed unmodified to notifier function
*
* Call all network notifier blocks. Parameters and return value
- * are as for blocking_notifier_call_chain().
+ * are as for raw_notifier_call_chain().
*/
int call_netdevice_notifiers(unsigned long val, void *v)
{
- return blocking_notifier_call_chain(&netdev_chain, val, v);
+ return raw_notifier_call_chain(&netdev_chain, val, v);
}
/* When > 0 there are consumers of rx skb time stamps */
@@ -2308,7 +2308,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
if (dev->flags & IFF_UP &&
((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
IFF_VOLATILE)))
- blocking_notifier_call_chain(&netdev_chain,
+ raw_notifier_call_chain(&netdev_chain,
NETDEV_CHANGE, dev);
if ((flags ^ dev->gflags) & IFF_PROMISC) {
@@ -2353,7 +2353,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
else
dev->mtu = new_mtu;
if (!err && dev->flags & IFF_UP)
- blocking_notifier_call_chain(&netdev_chain,
+ raw_notifier_call_chain(&netdev_chain,
NETDEV_CHANGEMTU, dev);
return err;
}
@@ -2370,7 +2370,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
return -ENODEV;
err = dev->set_mac_address(dev, sa);
if (!err)
- blocking_notifier_call_chain(&netdev_chain,
+ raw_notifier_call_chain(&netdev_chain,
NETDEV_CHANGEADDR, dev);
return err;
}
@@ -2427,7 +2427,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
return -EINVAL;
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
- blocking_notifier_call_chain(&netdev_chain,
+ raw_notifier_call_chain(&netdev_chain,
NETDEV_CHANGEADDR, dev);
return 0;
@@ -2777,6 +2777,8 @@ int register_netdevice(struct net_device *dev)
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
+ might_sleep();
+
/* When net_device's are persistent, this will be fatal. */
BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
@@ -2863,6 +2865,11 @@ int register_netdevice(struct net_device *dev)
if (!dev->rebuild_header)
dev->rebuild_header = default_rebuild_header;
+ ret = netdev_register_sysfs(dev);
+ if (ret)
+ goto out_err;
+ dev->reg_state = NETREG_REGISTERED;
+
/*
* Default initial state at registry is that the
* device is present.
@@ -2878,14 +2885,11 @@ int register_netdevice(struct net_device *dev)
hlist_add_head(&dev->name_hlist, head);
hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
dev_hold(dev);
- dev->reg_state = NETREG_REGISTERING;
write_unlock_bh(&dev_base_lock);
/* Notify protocols, that a new device appeared. */
- blocking_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
+ raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
- /* Finish registration after unlock */
- net_set_todo(dev);
ret = 0;
out:
@@ -2961,7 +2965,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
rtnl_lock();
/* Rebroadcast unregister notification */
- blocking_notifier_call_chain(&netdev_chain,
+ raw_notifier_call_chain(&netdev_chain,
NETDEV_UNREGISTER, dev);
if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
@@ -3008,7 +3012,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
*
* We are invoked by rtnl_unlock() after it drops the semaphore.
* This allows us to deal with problems:
- * 1) We can create/delete sysfs objects which invoke hotplug
+ * 1) We can delete sysfs objects which invoke hotplug
* without deadlocking with linkwatch via keventd.
* 2) Since we run with the RTNL semaphore not held, we can sleep
* safely in order to wait for the netdev refcnt to drop to zero.
@@ -3017,8 +3021,6 @@ static DEFINE_MUTEX(net_todo_run_mutex);
void netdev_run_todo(void)
{
struct list_head list = LIST_HEAD_INIT(list);
- int err;
-
/* Need to guard against multiple cpu's getting out of order. */
mutex_lock(&net_todo_run_mutex);
@@ -3041,40 +3043,29 @@ void netdev_run_todo(void)
= list_entry(list.next, struct net_device, todo_list);
list_del(&dev->todo_list);
- switch(dev->reg_state) {
- case NETREG_REGISTERING:
- dev->reg_state = NETREG_REGISTERED;
- err = netdev_register_sysfs(dev);
- if (err)
- printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
- dev->name, err);
- break;
-
- case NETREG_UNREGISTERING:
- netdev_unregister_sysfs(dev);
- dev->reg_state = NETREG_UNREGISTERED;
-
- netdev_wait_allrefs(dev);
+ if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
+ printk(KERN_ERR "network todo '%s' but state %d\n",
+ dev->name, dev->reg_state);
+ dump_stack();
+ continue;
+ }
- /* paranoia */
- BUG_ON(atomic_read(&dev->refcnt));
- BUG_TRAP(!dev->ip_ptr);
- BUG_TRAP(!dev->ip6_ptr);
- BUG_TRAP(!dev->dn_ptr);
+ netdev_unregister_sysfs(dev);
+ dev->reg_state = NETREG_UNREGISTERED;
+ netdev_wait_allrefs(dev);
- /* It must be the very last action,
- * after this 'dev' may point to freed up memory.
- */
- if (dev->destructor)
- dev->destructor(dev);
- break;
+ /* paranoia */
+ BUG_ON(atomic_read(&dev->refcnt));
+ BUG_TRAP(!dev->ip_ptr);
+ BUG_TRAP(!dev->ip6_ptr);
+ BUG_TRAP(!dev->dn_ptr);
- default:
- printk(KERN_ERR "network todo '%s' but state %d\n",
- dev->name, dev->reg_state);
- break;
- }
+ /* It must be the very last action,
+ * after this 'dev' may point to freed up memory.
+ */
+ if (dev->destructor)
+ dev->destructor(dev);
}
out:
@@ -3216,7 +3207,7 @@ int unregister_netdevice(struct net_device *dev)
/* Notify protocols, that we are about to destroy
this device. They should clean all the things.
*/
- blocking_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
+ raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
/*
* Flush the multicast chain
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 341de44c7ed1..646937cc2d84 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -170,13 +170,13 @@ void linkwatch_fire_event(struct net_device *dev)
spin_unlock_irqrestore(&lweventlist_lock, flags);
if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) {
- unsigned long thisevent = jiffies;
+ unsigned long delay = linkwatch_nextevent - jiffies;
- if (thisevent >= linkwatch_nextevent) {
+ /* If we wrap around we'll delay it by at most HZ. */
+ if (!delay || delay > HZ)
schedule_work(&linkwatch_work);
- } else {
- schedule_delayed_work(&linkwatch_work, linkwatch_nextevent - thisevent);
- }
+ else
+ schedule_delayed_work(&linkwatch_work, delay);
}
}
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c12990c9c603..47a6fceb6771 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -29,7 +29,7 @@ static const char fmt_ulong[] = "%lu\n";
static inline int dev_isalive(const struct net_device *dev)
{
- return dev->reg_state == NETREG_REGISTERED;
+ return dev->reg_state <= NETREG_REGISTERED;
}
/* use same locking rules as GIF* ioctl's */
@@ -445,58 +445,33 @@ static struct class net_class = {
void netdev_unregister_sysfs(struct net_device * net)
{
- struct class_device * class_dev = &(net->class_dev);
-
- if (net->get_stats)
- sysfs_remove_group(&class_dev->kobj, &netstat_group);
-
-#ifdef WIRELESS_EXT
- if (net->get_wireless_stats || (net->wireless_handlers &&
- net->wireless_handlers->get_wireless_stats))
- sysfs_remove_group(&class_dev->kobj, &wireless_group);
-#endif
- class_device_del(class_dev);
-
+ class_device_del(&(net->class_dev));
}
/* Create sysfs entries for network device. */
int netdev_register_sysfs(struct net_device *net)
{
struct class_device *class_dev = &(net->class_dev);
- int ret;
+ struct attribute_group **groups = net->sysfs_groups;
+ class_device_initialize(class_dev);
class_dev->class = &net_class;
class_dev->class_data = net;
+ class_dev->groups = groups;
+ BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
strlcpy(class_dev->class_id, net->name, BUS_ID_SIZE);
- if ((ret = class_device_register(class_dev)))
- goto out;
- if (net->get_stats &&
- (ret = sysfs_create_group(&class_dev->kobj, &netstat_group)))
- goto out_unreg;
+ if (net->get_stats)
+ *groups++ = &netstat_group;
#ifdef WIRELESS_EXT
- if (net->get_wireless_stats || (net->wireless_handlers &&
- net->wireless_handlers->get_wireless_stats)) {
- ret = sysfs_create_group(&class_dev->kobj, &wireless_group);
- if (ret)
- goto out_cleanup;
- }
- return 0;
-out_cleanup:
- if (net->get_stats)
- sysfs_remove_group(&class_dev->kobj, &netstat_group);
-#else
- return 0;
+ if (net->get_wireless_stats
+ || (net->wireless_handlers && net->wireless_handlers->get_wireless_stats))
+ *groups++ = &wireless_group;
#endif
-out_unreg:
- printk(KERN_WARNING "%s: sysfs attribute registration failed %d\n",
- net->name, ret);
- class_device_unregister(class_dev);
-out:
- return ret;
+ return class_device_add(class_dev);
}
int netdev_sysfs_init(void)
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index fb79ce7d6439..57ea9f6f465c 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -51,11 +51,12 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
spin_lock_irqsave(&mac->lock, flags);
mac->associnfo.associating = 1;
mac->associated = 0; /* just to make sure */
- spin_unlock_irqrestore(&mac->lock, flags);
/* Set a timer for timeout */
/* FIXME: make timeout configurable */
- schedule_delayed_work(&mac->associnfo.timeout, 5 * HZ);
+ if (likely(mac->running))
+ schedule_delayed_work(&mac->associnfo.timeout, 5 * HZ);
+ spin_unlock_irqrestore(&mac->lock, flags);
}
void
@@ -319,6 +320,9 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
u16 status = le16_to_cpup(&resp->status);
struct ieee80211softmac_network *network = NULL;
unsigned long flags;
+
+ if (unlikely(!mac->running))
+ return -ENODEV;
spin_lock_irqsave(&mac->lock, flags);
@@ -377,10 +381,16 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
{
struct ieee80211softmac_device *mac = ieee80211_priv(dev);
unsigned long flags;
+
+ if (unlikely(!mac->running))
+ return -ENODEV;
+
if (memcmp(disassoc->header.addr2, mac->associnfo.bssid, ETH_ALEN))
return 0;
+
if (memcmp(disassoc->header.addr1, mac->dev->dev_addr, ETH_ALEN))
return 0;
+
dprintk(KERN_INFO PFX "got disassoc frame\n");
netif_carrier_off(dev);
spin_lock_irqsave(&mac->lock, flags);
@@ -400,6 +410,9 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev,
struct ieee80211softmac_device *mac = ieee80211_priv(dev);
struct ieee80211softmac_network *network;
+ if (unlikely(!mac->running))
+ return -ENODEV;
+
network = ieee80211softmac_get_network_by_bssid(mac, resp->header.addr3);
if (!network) {
dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 9a0eac6c61eb..06e332624665 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -86,6 +86,11 @@ ieee80211softmac_auth_queue(void *data)
/* Lock and set flags */
spin_lock_irqsave(&mac->lock, flags);
+ if (unlikely(!mac->running)) {
+ /* Prevent reschedule on workqueue flush */
+ spin_unlock_irqrestore(&mac->lock, flags);
+ return;
+ }
net->authenticated = 0;
net->authenticating = 1;
/* add a timeout call so we eventually give up waiting for an auth reply */
@@ -124,6 +129,9 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
unsigned long flags;
u8 * data;
+ if (unlikely(!mac->running))
+ return -ENODEV;
+
/* Find correct auth queue item */
spin_lock_irqsave(&mac->lock, flags);
list_for_each(list_ptr, &mac->auth_queue) {
@@ -298,8 +306,6 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
/* can't transmit data right now... */
netif_carrier_off(mac->dev);
- /* let's try to re-associate */
- schedule_work(&mac->associnfo.work);
spin_unlock_irqrestore(&mac->lock, flags);
}
@@ -338,6 +344,9 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
struct ieee80211softmac_network *net = NULL;
struct ieee80211softmac_device *mac = ieee80211_priv(dev);
+ if (unlikely(!mac->running))
+ return -ENODEV;
+
if (!deauth) {
dprintk("deauth without deauth packet. eek!\n");
return 0;
@@ -360,5 +369,8 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
}
ieee80211softmac_deauth_from_net(mac, net);
+
+ /* let's try to re-associate */
+ schedule_work(&mac->associnfo.work);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index be83bdc1644a..6252be2c0db9 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -89,6 +89,8 @@ ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm)
ieee80211softmac_wait_for_scan(sm);
spin_lock_irqsave(&sm->lock, flags);
+ sm->running = 0;
+
/* Free all pending assoc work items */
cancel_delayed_work(&sm->associnfo.work);
@@ -204,6 +206,8 @@ void ieee80211softmac_start(struct net_device *dev)
assert(0);
if (mac->txrates_change)
mac->txrates_change(dev, change, &oldrates);
+
+ mac->running = 1;
}
EXPORT_SYMBOL_GPL(ieee80211softmac_start);
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index 2b9e7edfa3ce..d31cf77498c4 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -115,7 +115,15 @@ void ieee80211softmac_scan(void *d)
// TODO: is this if correct, or should we do this only if scanning from assoc request?
if (sm->associnfo.req_essid.len)
ieee80211softmac_send_mgt_frame(sm, &sm->associnfo.req_essid, IEEE80211_STYPE_PROBE_REQ, 0);
+
+ spin_lock_irqsave(&sm->lock, flags);
+ if (unlikely(!sm->running)) {
+ /* Prevent reschedule on workqueue flush */
+ spin_unlock_irqrestore(&sm->lock, flags);
+ break;
+ }
schedule_delayed_work(&si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY);
+ spin_unlock_irqrestore(&sm->lock, flags);
return;
} else {
dprintk(PFX "Not probing Channel %d (not allowed here)\n", si->channels[current_channel_idx].channel);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 18d7fad474d7..c9026dbf4c93 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -337,7 +337,7 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
* Initialise the virtual path cache for the packet. It describes
* how the packet travels inside Linux networking.
*/
- if (likely(skb->dst == NULL)) {
+ if (skb->dst == NULL) {
int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
skb->dev);
if (unlikely(err)) {
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 9bebad07bf2e..cbcae6544622 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -209,7 +209,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
void ip_options_fragment(struct sk_buff * skb)
{
- unsigned char * optptr = skb->nh.raw;
+ unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr);
struct ip_options * opt = &(IPCB(skb)->opt);
int l = opt->optlen;
int optlen;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index f8f3a37a1494..eb2865d5ae28 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -173,6 +173,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
if (err) {
sk->sk_err_soft = -err;
+ kfree_skb(skb);
return err;
}
@@ -181,6 +182,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
sk->sk_route_caps = 0;
+ kfree_skb(skb);
return err;
}
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index c6d169fbdceb..82e665c79991 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -257,7 +257,6 @@ struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name)
/* Unsafe (locking), attrib might change */
return attrib;
}
-EXPORT_SYMBOL(irias_find_attrib);
/*
* Function irias_add_attribute (obj, attrib)
@@ -484,7 +483,6 @@ struct ias_value *irias_new_string_value(char *string)
return value;
}
-EXPORT_SYMBOL(irias_new_string_value);
/*
* Function irias_new_octseq_value (octets, len)
@@ -519,7 +517,6 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
memcpy(value->t.oct_seq, octseq , len);
return value;
}
-EXPORT_SYMBOL(irias_new_octseq_value);
struct ias_value *irias_new_missing_value(void)
{
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 91132f6871d7..f1c7bd29f2cd 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -974,10 +974,10 @@ hfsc_adjust_levels(struct hfsc_class *cl)
do {
level = 0;
list_for_each_entry(p, &cl->children, siblings) {
- if (p->level > level)
- level = p->level;
+ if (p->level >= level)
+ level = p->level + 1;
}
- cl->level = level + 1;
+ cl->level = level;
} while ((cl = cl->cl_parent) != NULL);
}
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 56b3bed1108f..331c079f029b 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -200,7 +200,11 @@ input_file() {
print_mtime "$1" >> ${output}
cat "$1" >> ${output}
else
- grep ^file "$1" | cut -d ' ' -f 3
+ cat "$1" | while read type dir file perm ; do
+ if [ "$type" == "file" ]; then
+ echo "$file \\";
+ fi
+ done
fi
elif [ -d "$1" ]; then
dir_filelist "$1"
diff --git a/scripts/mkmakefile b/scripts/mkmakefile
index a22cbedd3b3e..7f9d544f9b6c 100644
--- a/scripts/mkmakefile
+++ b/scripts/mkmakefile
@@ -10,7 +10,10 @@
# $4 - patchlevel
-cat << EOF
+test ! -r $2/Makefile -o -O $2/Makefile || exit 0
+echo " GEN $2/Makefile"
+
+cat << EOF > $2/Makefile
# Automatically generated by $0: don't edit
VERSION = $3
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index cd00e9f07589..6d04504b2fc1 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -487,14 +487,14 @@ static int strrcmp(const char *s, const char *sub)
* atsym =__param*
*
* Pattern 2:
- * Many drivers utilise a *_driver container with references to
+ * Many drivers utilise a *driver container with references to
* add, remove, probe functions etc.
* These functions may often be marked __init and we do not want to
* warn here.
* the pattern is identified by:
* tosec = .init.text | .exit.text | .init.data
* fromsec = .data
- * atsym = *_driver, *_template, *_sht, *_ops, *_probe, *probe_one
+ * atsym = *driver, *_template, *_sht, *_ops, *_probe, *probe_one
**/
static int secref_whitelist(const char *tosec, const char *fromsec,
const char *atsym)
@@ -502,7 +502,7 @@ static int secref_whitelist(const char *tosec, const char *fromsec,
int f1 = 1, f2 = 1;
const char **s;
const char *pat2sym[] = {
- "_driver",
+ "driver",
"_template", /* scsi uses *_template a lot */
"_sht", /* scsi also used *_sht to some extent */
"_ops",
OpenPOWER on IntegriCloud