summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt9
-rw-r--r--Documentation/watchdog/watchdog-api.txt3
-rw-r--r--arch/ia64/configs/sn2_defconfig4
-rw-r--r--arch/ia64/kernel/iosapic.c4
-rw-r--r--arch/ia64/kernel/irq.c1
-rw-r--r--drivers/char/watchdog/i8xx_tco.c16
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c6
-rw-r--r--drivers/char/watchdog/sc1200wdt.c2
-rw-r--r--drivers/ieee1394/ohci1394.c2
-rw-r--r--drivers/ieee1394/sbp2.c209
-rw-r--r--drivers/ieee1394/sbp2.h18
-rw-r--r--drivers/net/ixp2000/enp2611.c13
-rw-r--r--drivers/net/ixp2000/pm3386.c30
-rw-r--r--drivers/net/ixp2000/pm3386.h1
-rw-r--r--drivers/net/sky2.c13
-rw-r--r--fs/Makefile2
-rw-r--r--fs/configfs/dir.c137
-rw-r--r--fs/ocfs2/aops.c46
-rw-r--r--fs/ocfs2/aops.h4
-rw-r--r--fs/ocfs2/extent_map.c6
-rw-r--r--fs/ocfs2/file.c86
-rw-r--r--fs/ocfs2/journal.c8
-rw-r--r--fs/ocfs2/uptodate.c4
-rw-r--r--fs/ocfs2/vote.c6
-rw-r--r--net/802/tr.c1
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_gre.c12
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_recent.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c2
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c2
-rw-r--r--net/ipx/af_ipx.c4
-rw-r--r--net/ipx/ipx_route.c2
-rw-r--r--net/netfilter/nfnetlink_log.c4
-rw-r--r--net/sched/sch_generic.c6
38 files changed, 466 insertions, 209 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 421bcfff6ad2..43ab119963d5 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -57,6 +57,15 @@ Who: Jody McIntyre <scjody@steamballoon.com>
---------------------------
+What: sbp2: module parameter "force_inquiry_hack"
+When: July 2006
+Why: Superceded by parameter "workarounds". Both parameters are meant to be
+ used ad-hoc and for single devices only, i.e. not in modprobe.conf,
+ therefore the impact of this feature replacement should be low.
+Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+---------------------------
+
What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
When: July 2006
Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt
index c5beb548cfc4..21ed51173662 100644
--- a/Documentation/watchdog/watchdog-api.txt
+++ b/Documentation/watchdog/watchdog-api.txt
@@ -36,6 +36,9 @@ timeout or margin. The simplest way to ping the watchdog is to write
some data to the device. So a very simple watchdog daemon would look
like this:
+#include <stdlib.h>
+#include <fcntl.h>
+
int main(int argc, const char *argv[]) {
int fd=open("/dev/watchdog",O_WRONLY);
if (fd==-1) {
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index f6a8853cd1b4..9ea35398e10d 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -134,7 +134,7 @@ CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_NUMA=y
-CONFIG_NODES_SHIFT=8
+CONFIG_NODES_SHIFT=10
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
@@ -1159,7 +1159,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_PREEMPT=y
-CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7956eb9058fc..d58c1c5c903a 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -416,7 +416,7 @@ iosapic_end_level_irq (unsigned int irq)
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte;
- move_irq(irq);
+ move_native_irq(irq);
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
iosapic_eoi(rte->addr, vec);
}
@@ -458,7 +458,7 @@ iosapic_ack_edge_irq (unsigned int irq)
{
irq_desc_t *idesc = irq_descp(irq);
- move_irq(irq);
+ move_native_irq(irq);
/*
* Once we have recorded IRQ_PENDING already, we can mask the
* interrupt for real. This prevents IRQ storms from unhandled
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 5ce908ef9c95..9c72ea3f6432 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -101,7 +101,6 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
if (irq < NR_IRQS) {
irq_affinity[irq] = mask;
- set_irq_info(irq, mask);
irq_redir[irq] = (char) (redir & 0xff);
}
}
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c
index a13395e2c372..fa2ba9ebe42a 100644
--- a/drivers/char/watchdog/i8xx_tco.c
+++ b/drivers/char/watchdog/i8xx_tco.c
@@ -33,11 +33,6 @@
* 82801E (C-ICH) : document number 273599-001, 273645-002,
* 82801EB (ICH5) : document number 252516-001, 252517-003,
* 82801ER (ICH5R) : document number 252516-001, 252517-003,
- * 82801FB (ICH6) : document number 301473-002, 301474-007,
- * 82801FR (ICH6R) : document number 301473-002, 301474-007,
- * 82801FBM (ICH6-M) : document number 301473-002, 301474-007,
- * 82801FW (ICH6W) : document number 301473-001, 301474-007,
- * 82801FRW (ICH6RW) : document number 301473-001, 301474-007
*
* 20000710 Nils Faerber
* Initial Version 0.01
@@ -66,6 +61,10 @@
* 20050807 Wim Van Sebroeck <wim@iguana.be>
* 0.08 Make sure that the watchdog is only "armed" when started.
* (Kernel Bug 4251)
+ * 20060416 Wim Van Sebroeck <wim@iguana.be>
+ * 0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and
+ * ICH7 chipsets. (See Kernel Bug 6031 - other code will support these
+ * chipsets)
*/
/*
@@ -90,7 +89,7 @@
#include "i8xx_tco.h"
/* Module and version information */
-#define TCO_VERSION "0.08"
+#define TCO_VERSION "0.09"
#define TCO_MODULE_NAME "i8xx TCO timer"
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
#define PFX TCO_MODULE_NAME ": "
@@ -391,11 +390,6 @@ static struct pci_device_id i8xx_tco_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }, /* End of list */
};
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 9dc54736e4eb..1ea04e9b2b0b 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -423,6 +423,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
if (tmr_atboot && started == 0) {
printk(KERN_INFO PFX "Starting Watchdog Timer\n");
s3c2410wdt_start();
+ } else if (!tmr_atboot) {
+ /* if we're not enabling the watchdog, then ensure it is
+ * disabled if it has been left running from the bootloader
+ * or other source */
+
+ s3c2410wdt_stop();
}
return 0;
diff --git a/drivers/char/watchdog/sc1200wdt.c b/drivers/char/watchdog/sc1200wdt.c
index 515ce7572049..20b88f9b7be2 100644
--- a/drivers/char/watchdog/sc1200wdt.c
+++ b/drivers/char/watchdog/sc1200wdt.c
@@ -377,7 +377,7 @@ static int __init sc1200wdt_init(void)
{
int ret;
- printk(banner);
+ printk("%s\n", banner);
spin_lock_init(&sc1200wdt_lock);
sema_init(&open_sem, 1);
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 19222878aae9..11f13778f139 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -553,7 +553,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
* register content.
* To actually enable physical responses is the job of our interrupt
* handler which programs the physical request filter. */
- reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
+ reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
DBGMSG("physUpperBoundOffset=%08x",
reg_read(ohci, OHCI1394_PhyUpperBound));
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f4206604db03..8a23fb54c693 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -42,6 +42,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/string.h>
+#include <linux/stringify.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
@@ -117,7 +118,8 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default
*/
static int max_sectors = SBP2_MAX_SECTORS;
module_param(max_sectors, int, 0444);
-MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
+MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
+ __stringify(SBP2_MAX_SECTORS) ")");
/*
* Exclusive login to sbp2 device? In most cases, the sbp2 driver should
@@ -135,18 +137,45 @@ module_param(exclusive_login, int, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
/*
- * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
- * if your sbp2 device is not properly handling the SCSI inquiry command.
- * This hack makes the inquiry look more like a typical MS Windows inquiry
- * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
+ * If any of the following workarounds is required for your device to work,
+ * please submit the kernel messages logged by sbp2 to the linux1394-devel
+ * mailing list.
*
- * If force_inquiry_hack=1 is required for your device to work,
- * please submit the logged sbp2_firmware_revision value of this device to
- * the linux1394-devel mailing list.
+ * - 128kB max transfer
+ * Limit transfer size. Necessary for some old bridges.
+ *
+ * - 36 byte inquiry
+ * When scsi_mod probes the device, let the inquiry command look like that
+ * from MS Windows.
+ *
+ * - skip mode page 8
+ * Suppress sending of mode_sense for mode page 8 if the device pretends to
+ * support the SCSI Primary Block commands instead of Reduced Block Commands.
+ *
+ * - fix capacity
+ * Tell sd_mod to correct the last sector number reported by read_capacity.
+ * Avoids access beyond actual disk limits on devices with an off-by-one bug.
+ * Don't use this with devices which don't have this bug.
+ *
+ * - override internal blacklist
+ * Instead of adding to the built-in blacklist, use only the workarounds
+ * specified in the module load parameter.
+ * Useful if a blacklist entry interfered with a non-broken device.
*/
+static int sbp2_default_workarounds;
+module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
+MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
+ ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
+ ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
+ ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
+ ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+ ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
+ ", or a combination)");
+
+/* legacy parameter */
static int force_inquiry_hack;
module_param(force_inquiry_hack, int, 0644);
-MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
+MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'");
/*
* Export information about protocols/devices supported by this driver.
@@ -266,14 +295,55 @@ static struct hpsb_protocol_driver sbp2_driver = {
};
/*
- * List of device firmwares that require the inquiry hack.
- * Yields a few false positives but did not break other devices so far.
+ * List of devices with known bugs.
+ *
+ * The firmware_revision field, masked with 0xffff00, is the best indicator
+ * for the type of bridge chip of a device. It yields a few false positives
+ * but this did not break correctly behaving devices so far.
*/
-static u32 sbp2_broken_inquiry_list[] = {
- 0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */
- /* DViCO Momobay CX-1 */
- 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
- /* QPS Fire DVDBurner */
+static const struct {
+ u32 firmware_revision;
+ u32 model_id;
+ unsigned workarounds;
+} sbp2_workarounds_table[] = {
+ /* TSB42AA9 */ {
+ .firmware_revision = 0x002800,
+ .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
+ SBP2_WORKAROUND_MODE_SENSE_8,
+ },
+ /* Initio bridges, actually only needed for some older ones */ {
+ .firmware_revision = 0x000200,
+ .workarounds = SBP2_WORKAROUND_INQUIRY_36,
+ },
+ /* Symbios bridge */ {
+ .firmware_revision = 0xa0b800,
+ .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
+ },
+ /*
+ * Note about the following Apple iPod blacklist entries:
+ *
+ * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our
+ * matching logic treats 0 as a wildcard, we cannot match this ID
+ * without rewriting the matching routine. Fortunately these iPods
+ * do not feature the read_capacity bug according to one report.
+ * Read_capacity behaviour as well as model_id could change due to
+ * Apple-supplied firmware updates though.
+ */
+ /* iPod 4th generation */ {
+ .firmware_revision = 0x0a2700,
+ .model_id = 0x000021,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ },
+ /* iPod mini */ {
+ .firmware_revision = 0x0a2700,
+ .model_id = 0x000023,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ },
+ /* iPod Photo */ {
+ .firmware_revision = 0x0a2700,
+ .model_id = 0x00007e,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ }
};
/**************************************
@@ -765,11 +835,16 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
/* Register the status FIFO address range. We could use the same FIFO
* for targets at different nodes. However we need different FIFOs per
- * target in order to support multi-unit devices. */
+ * target in order to support multi-unit devices.
+ * The FIFO is located out of the local host controller's physical range
+ * but, if possible, within the posted write area. Status writes will
+ * then be performed as unified transactions. This slightly reduces
+ * bandwidth usage, and some Prolific based devices seem to require it.
+ */
scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
- ~0ULL, ~0ULL);
+ 0x010000000000ULL, CSR1212_ALL_SPACE_END);
if (!scsi_id->status_fifo_addr) {
SBP2_ERR("failed to allocate status FIFO address range");
goto failed_alloc;
@@ -1450,7 +1525,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
struct csr1212_dentry *dentry;
u64 management_agent_addr;
u32 command_set_spec_id, command_set, unit_characteristics,
- firmware_revision, workarounds;
+ firmware_revision;
+ unsigned workarounds;
int i;
SBP2_DEBUG_ENTER();
@@ -1506,12 +1582,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
case SBP2_FIRMWARE_REVISION_KEY:
/* Firmware revision */
firmware_revision = kv->value.immediate;
- if (force_inquiry_hack)
- SBP2_INFO("sbp2_firmware_revision = %x",
- (unsigned int)firmware_revision);
- else
- SBP2_DEBUG("sbp2_firmware_revision = %x",
- (unsigned int)firmware_revision);
+ SBP2_DEBUG("sbp2_firmware_revision = %x",
+ (unsigned int)firmware_revision);
break;
default:
@@ -1519,41 +1591,44 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
}
}
- /* This is the start of our broken device checking. We try to hack
- * around oddities and known defects. */
- workarounds = 0x0;
+ workarounds = sbp2_default_workarounds;
+ if (force_inquiry_hack) {
+ SBP2_WARN("force_inquiry_hack is deprecated. "
+ "Use parameter 'workarounds' instead.");
+ workarounds |= SBP2_WORKAROUND_INQUIRY_36;
+ }
- /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
- * bridge with 128KB max transfer size limitation. For sanity, we
- * only voice this when the current max_sectors setting
- * exceeds the 128k limit. By default, that is not the case.
- *
- * It would be really nice if we could detect this before the scsi
- * host gets initialized. That way we can down-force the
- * max_sectors to account for it. That is not currently
- * possible. */
- if ((firmware_revision & 0xffff00) ==
- SBP2_128KB_BROKEN_FIRMWARE &&
- (max_sectors * 512) > (128*1024)) {
- SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
- NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
- SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
- max_sectors);
- workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
- }
-
- /* Check for a blacklisted set of devices that require us to force
- * a 36 byte host inquiry. This can be overriden as a module param
- * (to force all hosts). */
- for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
- if ((firmware_revision & 0xffff00) ==
- sbp2_broken_inquiry_list[i]) {
- SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
- NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
- workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
- break; /* No need to continue. */
+ if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
+ for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+ if (sbp2_workarounds_table[i].firmware_revision &&
+ sbp2_workarounds_table[i].firmware_revision !=
+ (firmware_revision & 0xffff00))
+ continue;
+ if (sbp2_workarounds_table[i].model_id &&
+ sbp2_workarounds_table[i].model_id != ud->model_id)
+ continue;
+ workarounds |= sbp2_workarounds_table[i].workarounds;
+ break;
}
- }
+
+ if (workarounds)
+ SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
+ "(firmware_revision 0x%06x, vendor_id 0x%06x,"
+ " model_id 0x%06x)",
+ NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+ workarounds, firmware_revision,
+ ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
+ ud->model_id);
+
+ /* We would need one SCSI host template for each target to adjust
+ * max_sectors on the fly, therefore warn only. */
+ if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+ (max_sectors * 512) > (128 * 1024))
+ SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
+ "max transfer size. WARNING: Current max_sectors "
+ "setting is larger than 128KB (%d sectors)",
+ NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+ max_sectors);
/* If this is a logical unit directory entry, process the parent
* to get the values. */
@@ -2447,19 +2522,25 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
scsi_id->sdev = sdev;
- if (force_inquiry_hack ||
- scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
+ if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
- sdev->skip_ms_page_8 = 1;
- }
return 0;
}
static int sbp2scsi_slave_configure(struct scsi_device *sdev)
{
+ struct scsi_id_instance_data *scsi_id =
+ (struct scsi_id_instance_data *)sdev->host->hostdata[0];
+
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
+
+ if (sdev->type == TYPE_DISK &&
+ scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+ sdev->skip_ms_page_8 = 1;
+ if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
+ sdev->fix_capacity = 1;
return 0;
}
@@ -2603,7 +2684,9 @@ static int sbp2_module_init(void)
scsi_driver_template.cmd_per_lun = 1;
}
- /* Set max sectors (module load option). Default is 255 sectors. */
+ if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+ (max_sectors * 512) > (128 * 1024))
+ max_sectors = 128 * 1024 / 512;
scsi_driver_template.max_sectors = max_sectors;
/* Register our high level driver with 1394 stack */
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index e2d357a9ea3a..f4ccc9d0fba4 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -227,11 +227,6 @@ struct sbp2_status_block {
#define SBP2_SW_VERSION_ENTRY 0x00010483
/*
- * Other misc defines
- */
-#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
-
-/*
* SCSI specific stuff
*/
@@ -239,6 +234,13 @@ struct sbp2_status_block {
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
#define SBP2_MAX_CMDS 8 /* This should be safe */
+/* Flags for detected oddities and brokeness */
+#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
+#define SBP2_WORKAROUND_INQUIRY_36 0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
+#define SBP2_WORKAROUND_OVERRIDE 0x100
+
/* This is the two dma types we use for cmd_dma below */
enum cmd_dma_types {
CMD_DMA_NONE,
@@ -268,10 +270,6 @@ struct sbp2_command_info {
};
-/* A list of flags for detected oddities and brokeness. */
-#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
-#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
-
struct sbp2scsi_host_info;
/*
@@ -345,7 +343,7 @@ struct scsi_id_instance_data {
struct Scsi_Host *scsi_host;
/* Device specific workarounds/brokeness */
- u32 workarounds;
+ unsigned workarounds;
};
/* Sbp2 host data structure (one per IEEE1394 host) */
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index 6f7dce8eba51..b67f586d7392 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -149,6 +149,8 @@ static void enp2611_check_link_status(unsigned long __dummy)
int status;
dev = nds[i];
+ if (dev == NULL)
+ continue;
status = pm3386_is_link_up(i);
if (status && !netif_carrier_ok(dev)) {
@@ -191,6 +193,7 @@ static void enp2611_set_port_admin_status(int port, int up)
static int __init enp2611_init_module(void)
{
+ int ports;
int i;
if (!machine_is_enp2611())
@@ -199,7 +202,8 @@ static int __init enp2611_init_module(void)
caleb_reset();
pm3386_reset();
- for (i = 0; i < 3; i++) {
+ ports = pm3386_port_count();
+ for (i = 0; i < ports; i++) {
nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
if (nds[i] == NULL) {
while (--i >= 0)
@@ -215,9 +219,10 @@ static int __init enp2611_init_module(void)
ixp2400_msf_init(&enp2611_msf_parameters);
- if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) {
- for (i = 0; i < 3; i++)
- free_netdev(nds[i]);
+ if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
+ for (i = 0; i < ports; i++)
+ if (nds[i])
+ free_netdev(nds[i]);
return -EINVAL;
}
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c
index 5c7ab7564053..5224651c9aac 100644
--- a/drivers/net/ixp2000/pm3386.c
+++ b/drivers/net/ixp2000/pm3386.c
@@ -86,40 +86,53 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
pm3386_reg_write(port >> 1, reg, value);
}
+int pm3386_secondary_present(void)
+{
+ return pm3386_reg_read(1, 0) == 0x3386;
+}
void pm3386_reset(void)
{
u8 mac[3][6];
+ int secondary;
+
+ secondary = pm3386_secondary_present();
/* Save programmed MAC addresses. */
pm3386_get_mac(0, mac[0]);
pm3386_get_mac(1, mac[1]);
- pm3386_get_mac(2, mac[2]);
+ if (secondary)
+ pm3386_get_mac(2, mac[2]);
/* Assert analog and digital reset. */
pm3386_reg_write(0, 0x002, 0x0060);
- pm3386_reg_write(1, 0x002, 0x0060);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0060);
mdelay(1);
/* Deassert analog reset. */
pm3386_reg_write(0, 0x002, 0x0062);
- pm3386_reg_write(1, 0x002, 0x0062);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0062);
mdelay(10);
/* Deassert digital reset. */
pm3386_reg_write(0, 0x002, 0x0063);
- pm3386_reg_write(1, 0x002, 0x0063);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0063);
mdelay(10);
/* Restore programmed MAC addresses. */
pm3386_set_mac(0, mac[0]);
pm3386_set_mac(1, mac[1]);
- pm3386_set_mac(2, mac[2]);
+ if (secondary)
+ pm3386_set_mac(2, mac[2]);
/* Disable carrier on all ports. */
pm3386_set_carrier(0, 0);
pm3386_set_carrier(1, 0);
- pm3386_set_carrier(2, 0);
+ if (secondary)
+ pm3386_set_carrier(2, 0);
}
static u16 swaph(u16 x)
@@ -127,6 +140,11 @@ static u16 swaph(u16 x)
return ((x << 8) | (x >> 8)) & 0xffff;
}
+int pm3386_port_count(void)
+{
+ return 2 + pm3386_secondary_present();
+}
+
void pm3386_init_port(int port)
{
int pm = port >> 1;
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h
index fe92bb056ac4..cc4183dca911 100644
--- a/drivers/net/ixp2000/pm3386.h
+++ b/drivers/net/ixp2000/pm3386.h
@@ -13,6 +13,7 @@
#define __PM3386_H
void pm3386_reset(void);
+int pm3386_port_count(void);
void pm3386_init_port(int port);
void pm3386_get_mac(int port, u8 *mac);
void pm3386_set_mac(int port, u8 *mac);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ffd267fab21d..62be6d99d05c 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1020,8 +1020,19 @@ static int sky2_up(struct net_device *dev)
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u32 ramsize, rxspace, imask;
- int err = -ENOMEM;
+ int err;
+ struct net_device *otherdev = hw->dev[sky2->port^1];
+ /* Block bringing up both ports at the same time on a dual port card.
+ * There is an unfixed bug where receiver gets confused and picks up
+ * packets out of order. Until this is fixed, prevent data corruption.
+ */
+ if (otherdev && netif_running(otherdev)) {
+ printk(KERN_INFO PFX "dual port support is disabled.\n");
+ return -EBUSY;
+ }
+
+ err = -ENOMEM;
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
diff --git a/fs/Makefile b/fs/Makefile
index 83bf478e786b..078d3d1191a5 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_DNOTIFY) += dnotify.o
obj-$(CONFIG_PROC_FS) += proc/
obj-y += partitions/
obj-$(CONFIG_SYSFS) += sysfs/
+obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-y += devpts/
obj-$(CONFIG_PROFILING) += dcookies.o
@@ -100,5 +101,4 @@ obj-$(CONFIG_BEFS_FS) += befs/
obj-$(CONFIG_HOSTFS) += hostfs/
obj-$(CONFIG_HPPFS) += hppfs/
obj-$(CONFIG_DEBUG_FS) += debugfs/
-obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-$(CONFIG_OCFS2_FS) += ocfs2/
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 5638c8f9362f..5f952187fc53 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -505,13 +505,15 @@ static int populate_groups(struct config_group *group)
int i;
if (group->default_groups) {
- /* FYI, we're faking mkdir here
+ /*
+ * FYI, we're faking mkdir here
* I'm not sure we need this semaphore, as we're called
* from our parent's mkdir. That holds our parent's
* i_mutex, so afaik lookup cannot continue through our
* parent to find us, let alone mess with our tree.
* That said, taking our i_mutex is closer to mkdir
- * emulation, and shouldn't hurt. */
+ * emulation, and shouldn't hurt.
+ */
mutex_lock(&dentry->d_inode->i_mutex);
for (i = 0; group->default_groups[i]; i++) {
@@ -546,20 +548,34 @@ static void unlink_obj(struct config_item *item)
item->ci_group = NULL;
item->ci_parent = NULL;
+
+ /* Drop the reference for ci_entry */
config_item_put(item);
+ /* Drop the reference for ci_parent */
config_group_put(group);
}
}
static void link_obj(struct config_item *parent_item, struct config_item *item)
{
- /* Parent seems redundant with group, but it makes certain
- * traversals much nicer. */
+ /*
+ * Parent seems redundant with group, but it makes certain
+ * traversals much nicer.
+ */
item->ci_parent = parent_item;
+
+ /*
+ * We hold a reference on the parent for the child's ci_parent
+ * link.
+ */
item->ci_group = config_group_get(to_config_group(parent_item));
list_add_tail(&item->ci_entry, &item->ci_group->cg_children);
+ /*
+ * We hold a reference on the child for ci_entry on the parent's
+ * cg_children
+ */
config_item_get(item);
}
@@ -684,6 +700,10 @@ static void client_drop_item(struct config_item *parent_item,
type = parent_item->ci_type;
BUG_ON(!type);
+ /*
+ * If ->drop_item() exists, it is responsible for the
+ * config_item_put().
+ */
if (type->ct_group_ops && type->ct_group_ops->drop_item)
type->ct_group_ops->drop_item(to_config_group(parent_item),
item);
@@ -694,23 +714,28 @@ static void client_drop_item(struct config_item *parent_item,
static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
- int ret;
+ int ret, module_got = 0;
struct config_group *group;
struct config_item *item;
struct config_item *parent_item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
struct config_item_type *type;
- struct module *owner;
+ struct module *owner = NULL;
char *name;
- if (dentry->d_parent == configfs_sb->s_root)
- return -EPERM;
+ if (dentry->d_parent == configfs_sb->s_root) {
+ ret = -EPERM;
+ goto out;
+ }
sd = dentry->d_parent->d_fsdata;
- if (!(sd->s_type & CONFIGFS_USET_DIR))
- return -EPERM;
+ if (!(sd->s_type & CONFIGFS_USET_DIR)) {
+ ret = -EPERM;
+ goto out;
+ }
+ /* Get a working ref for the duration of this function */
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
subsys = to_config_group(parent_item)->cg_subsys;
@@ -719,15 +744,16 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (!type || !type->ct_group_ops ||
(!type->ct_group_ops->make_group &&
!type->ct_group_ops->make_item)) {
- config_item_put(parent_item);
- return -EPERM; /* What lack-of-mkdir returns */
+ ret = -EPERM; /* Lack-of-mkdir returns -EPERM */
+ goto out_put;
}
name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL);
if (!name) {
- config_item_put(parent_item);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_put;
}
+
snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name);
down(&subsys->su_sem);
@@ -748,40 +774,67 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
kfree(name);
if (!item) {
- config_item_put(parent_item);
- return -ENOMEM;
+ /*
+ * If item == NULL, then link_obj() was never called.
+ * There are no extra references to clean up.
+ */
+ ret = -ENOMEM;
+ goto out_put;
}
- ret = -EINVAL;
+ /*
+ * link_obj() has been called (via link_group() for groups).
+ * From here on out, errors must clean that up.
+ */
+
type = item->ci_type;
- if (type) {
- owner = type->ct_owner;
- if (try_module_get(owner)) {
- if (group) {
- ret = configfs_attach_group(parent_item,
- item,
- dentry);
- } else {
- ret = configfs_attach_item(parent_item,
- item,
- dentry);
- }
+ if (!type) {
+ ret = -EINVAL;
+ goto out_unlink;
+ }
- if (ret) {
- down(&subsys->su_sem);
- if (group)
- unlink_group(group);
- else
- unlink_obj(item);
- client_drop_item(parent_item, item);
- up(&subsys->su_sem);
+ owner = type->ct_owner;
+ if (!try_module_get(owner)) {
+ ret = -EINVAL;
+ goto out_unlink;
+ }
- config_item_put(parent_item);
- module_put(owner);
- }
- }
+ /*
+ * I hate doing it this way, but if there is
+ * an error, module_put() probably should
+ * happen after any cleanup.
+ */
+ module_got = 1;
+
+ if (group)
+ ret = configfs_attach_group(parent_item, item, dentry);
+ else
+ ret = configfs_attach_item(parent_item, item, dentry);
+
+out_unlink:
+ if (ret) {
+ /* Tear down everything we built up */
+ down(&subsys->su_sem);
+ if (group)
+ unlink_group(group);
+ else
+ unlink_obj(item);
+ client_drop_item(parent_item, item);
+ up(&subsys->su_sem);
+
+ if (module_got)
+ module_put(owner);
}
+out_put:
+ /*
+ * link_obj()/link_group() took a reference from child->parent,
+ * so the parent is safely pinned. We can drop our working
+ * reference.
+ */
+ config_item_put(parent_item);
+
+out:
return ret;
}
@@ -801,6 +854,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
if (sd->s_type & CONFIGFS_USET_DEFAULT)
return -EPERM;
+ /* Get a working ref until we have the child */
parent_item = configfs_get_config_item(dentry->d_parent);
subsys = to_config_group(parent_item)->cg_subsys;
BUG_ON(!subsys);
@@ -817,6 +871,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
return ret;
}
+ /* Get a working ref for the duration of this function */
item = configfs_get_config_item(dentry);
/* Drop reference from above, item already holds one. */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0d858d0b25be..47152bf9a7f2 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -276,13 +276,29 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
return ret;
}
+/* This can also be called from ocfs2_write_zero_page() which has done
+ * it's own cluster locking. */
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+ unsigned from, unsigned to)
+{
+ int ret;
+
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ ret = block_prepare_write(page, from, to, ocfs2_get_block);
+
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ return ret;
+}
+
/*
* ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
* from loopback. It must be able to perform its own locking around
* ocfs2_get_block().
*/
-int ocfs2_prepare_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
+static int ocfs2_prepare_write(struct file *file, struct page *page,
+ unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
int ret;
@@ -295,11 +311,7 @@ int ocfs2_prepare_write(struct file *file, struct page *page,
goto out;
}
- down_read(&OCFS2_I(inode)->ip_alloc_sem);
-
- ret = block_prepare_write(page, from, to, ocfs2_get_block);
-
- up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ ret = ocfs2_prepare_write_nolock(inode, page, from, to);
ocfs2_meta_unlock(inode, 0);
out:
@@ -625,11 +637,31 @@ static ssize_t ocfs2_direct_IO(int rw,
int ret;
mlog_entry_void();
+
+ /*
+ * We get PR data locks even for O_DIRECT. This allows
+ * concurrent O_DIRECT I/O but doesn't let O_DIRECT with
+ * extending and buffered zeroing writes race. If they did
+ * race then the buffered zeroing could be written back after
+ * the O_DIRECT I/O. It's one thing to tell people not to mix
+ * buffered and O_DIRECT writes, but expecting them to
+ * understand that file extension is also an implicit buffered
+ * write is too much. By getting the PR we force writeback of
+ * the buffered zeroing before proceeding.
+ */
+ ret = ocfs2_data_lock(inode, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ ocfs2_data_unlock(inode, 0);
+
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
inode->i_sb->s_bdev, iov, offset,
nr_segs,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io);
+out:
mlog_exit(ret);
return ret;
}
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index d40456d509a0..e88c3f0b8fa9 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -22,8 +22,8 @@
#ifndef OCFS2_AOPS_H
#define OCFS2_AOPS_H
-int ocfs2_prepare_write(struct file *file, struct page *page,
- unsigned from, unsigned to);
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+ unsigned from, unsigned to);
struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
struct page *page,
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 4601fc256f11..1a5c69071df6 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -569,7 +569,7 @@ static int ocfs2_extent_map_insert(struct inode *inode,
ret = -ENOMEM;
ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!ctxt.new_ent) {
mlog_errno(ret);
return ret;
@@ -583,14 +583,14 @@ static int ocfs2_extent_map_insert(struct inode *inode,
if (ctxt.need_left && !ctxt.left_ent) {
ctxt.left_ent =
kmem_cache_alloc(ocfs2_em_ent_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!ctxt.left_ent)
break;
}
if (ctxt.need_right && !ctxt.right_ent) {
ctxt.right_ent =
kmem_cache_alloc(ocfs2_em_ent_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!ctxt.right_ent)
break;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 581eb451a41a..a9559c874530 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -613,7 +613,8 @@ leave:
/* Some parts of this taken from generic_cont_expand, which turned out
* to be too fragile to do exactly what we need without us having to
- * worry about recursive locking in ->commit_write(). */
+ * worry about recursive locking in ->prepare_write() and
+ * ->commit_write(). */
static int ocfs2_write_zero_page(struct inode *inode,
u64 size)
{
@@ -641,7 +642,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
goto out;
}
- ret = ocfs2_prepare_write(NULL, page, offset, offset);
+ ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
@@ -695,13 +696,26 @@ out:
return ret;
}
+/*
+ * A tail_to_skip value > 0 indicates that we're being called from
+ * ocfs2_file_aio_write(). This has the following implications:
+ *
+ * - we don't want to update i_size
+ * - di_bh will be NULL, which is fine because it's only used in the
+ * case where we want to update i_size.
+ * - ocfs2_zero_extend() will then only be filling the hole created
+ * between i_size and the start of the write.
+ */
static int ocfs2_extend_file(struct inode *inode,
struct buffer_head *di_bh,
- u64 new_i_size)
+ u64 new_i_size,
+ size_t tail_to_skip)
{
int ret = 0;
u32 clusters_to_add;
+ BUG_ON(!tail_to_skip && !di_bh);
+
/* setattr sometimes calls us like this. */
if (new_i_size == 0)
goto out;
@@ -714,27 +728,44 @@ static int ocfs2_extend_file(struct inode *inode,
OCFS2_I(inode)->ip_clusters;
if (clusters_to_add) {
- ret = ocfs2_extend_allocation(inode, clusters_to_add);
+ /*
+ * protect the pages that ocfs2_zero_extend is going to
+ * be pulling into the page cache.. we do this before the
+ * metadata extend so that we don't get into the situation
+ * where we've extended the metadata but can't get the data
+ * lock to zero.
+ */
+ ret = ocfs2_data_lock(inode, 1);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_zero_extend(inode, new_i_size);
+ ret = ocfs2_extend_allocation(inode, clusters_to_add);
if (ret < 0) {
mlog_errno(ret);
- goto out;
+ goto out_unlock;
}
- }
- /* No allocation required, we just use this helper to
- * do a trivial update of i_size. */
- ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
+ ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+ }
+
+ if (!tail_to_skip) {
+ /* We're being called from ocfs2_setattr() which wants
+ * us to update i_size */
+ ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
+ if (ret < 0)
+ mlog_errno(ret);
}
+out_unlock:
+ if (clusters_to_add) /* this is the only case in which we lock */
+ ocfs2_data_unlock(inode, 1);
+
out:
return ret;
}
@@ -793,7 +824,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (i_size_read(inode) > attr->ia_size)
status = ocfs2_truncate_file(inode, bh, attr->ia_size);
else
- status = ocfs2_extend_file(inode, bh, attr->ia_size);
+ status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -1049,21 +1080,12 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
if (!clusters)
break;
- ret = ocfs2_extend_allocation(inode, clusters);
+ ret = ocfs2_extend_file(inode, NULL, newsize, count);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
-
- /* Fill any holes which would've been created by this
- * write. If we're O_APPEND, this will wind up
- * (correctly) being a noop. */
- ret = ocfs2_zero_extend(inode, (u64) newsize - count);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
break;
}
@@ -1146,6 +1168,22 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
ocfs2_iocb_set_rw_locked(iocb);
}
+ /*
+ * We're fine letting folks race truncates and extending
+ * writes with read across the cluster, just like they can
+ * locally. Hence no rw_lock during read.
+ *
+ * Take and drop the meta data lock to update inode fields
+ * like i_size. This allows the checks down below
+ * generic_file_aio_read() a chance of actually working.
+ */
+ ret = ocfs2_meta_lock(inode, NULL, NULL, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto bail;
+ }
+ ocfs2_meta_unlock(inode, 0);
+
ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos);
if (ret == -EINVAL)
mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 6a610ae53583..eebc3cfa6be8 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -117,7 +117,7 @@ struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb)
{
struct ocfs2_journal_handle *retval = NULL;
- retval = kcalloc(1, sizeof(*retval), GFP_KERNEL);
+ retval = kcalloc(1, sizeof(*retval), GFP_NOFS);
if (!retval) {
mlog(ML_ERROR, "Failed to allocate memory for journal "
"handle!\n");
@@ -870,9 +870,11 @@ static int ocfs2_force_read_journal(struct inode *inode)
if (p_blocks > CONCURRENT_JOURNAL_FILL)
p_blocks = CONCURRENT_JOURNAL_FILL;
+ /* We are reading journal data which should not
+ * be put in the uptodate cache */
status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb),
p_blkno, p_blocks, bhs, 0,
- inode);
+ NULL);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -982,7 +984,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
{
struct ocfs2_la_recovery_item *item;
- item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL);
+ item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
if (!item) {
/* Though we wish to avoid it, we are in fact safe in
* skipping local alloc cleanup as fsck.ocfs2 is more
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 04a684dfdd96..b8a00a793326 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -337,7 +337,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
(unsigned long long)oi->ip_blkno,
(unsigned long long)block, expand_tree);
- new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL);
+ new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
if (!new) {
mlog_errno(-ENOMEM);
return;
@@ -349,7 +349,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
* has no way of tracking that. */
for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!tree[i]) {
mlog_errno(-ENOMEM);
goto out_free;
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index 53049a204197..ee42765a8553 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -586,7 +586,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response
{
struct ocfs2_net_wait_ctxt *w;
- w = kcalloc(1, sizeof(*w), GFP_KERNEL);
+ w = kcalloc(1, sizeof(*w), GFP_NOFS);
if (!w) {
mlog_errno(-ENOMEM);
goto bail;
@@ -749,7 +749,7 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb,
BUG_ON(!ocfs2_is_valid_vote_request(type));
- request = kcalloc(1, sizeof(*request), GFP_KERNEL);
+ request = kcalloc(1, sizeof(*request), GFP_NOFS);
if (!request) {
mlog_errno(-ENOMEM);
} else {
@@ -1129,7 +1129,7 @@ static int ocfs2_handle_vote_message(struct o2net_msg *msg,
struct ocfs2_super *osb = data;
struct ocfs2_vote_work *work;
- work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL);
+ work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS);
if (!work) {
status = -ENOMEM;
mlog_errno(status);
diff --git a/net/802/tr.c b/net/802/tr.c
index afd8385c0c9c..e9dc803f2fe0 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -643,6 +643,5 @@ static int __init rif_init(void)
module_init(rif_init);
-EXPORT_SYMBOL(tr_source_route);
EXPORT_SYMBOL(tr_type_trans);
EXPORT_SYMBOL(alloc_trdev);
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index d159c92cca84..466ed3440b74 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -168,7 +168,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
if (info->bitmask & EBT_LOG_NFLOG)
nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
- info->prefix);
+ "%s", info->prefix);
else
ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
info->prefix);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index c2d92f99a2b8..d0d19192026d 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -948,7 +948,7 @@ static int do_add_counters(void __user *user, unsigned int len)
write_lock_bh(&t->lock);
private = t->private;
- if (private->number != paddc->num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
index 6c4899d8046a..96ceabaec402 100644
--- a/net/ipv4/netfilter/ip_nat_proto_gre.c
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -49,15 +49,15 @@ gre_in_range(const struct ip_conntrack_tuple *tuple,
const union ip_conntrack_manip_proto *min,
const union ip_conntrack_manip_proto *max)
{
- u_int32_t key;
+ __be16 key;
if (maniptype == IP_NAT_MANIP_SRC)
key = tuple->src.u.gre.key;
else
key = tuple->dst.u.gre.key;
- return ntohl(key) >= ntohl(min->gre.key)
- && ntohl(key) <= ntohl(max->gre.key);
+ return ntohs(key) >= ntohs(min->gre.key)
+ && ntohs(key) <= ntohs(max->gre.key);
}
/* generate unique tuple ... */
@@ -81,14 +81,14 @@ gre_unique_tuple(struct ip_conntrack_tuple *tuple,
min = 1;
range_size = 0xffff;
} else {
- min = ntohl(range->min.gre.key);
- range_size = ntohl(range->max.gre.key) - min + 1;
+ min = ntohs(range->min.gre.key);
+ range_size = ntohs(range->max.gre.key) - min + 1;
}
DEBUGP("min = %u, range_size = %u\n", min, range_size);
for (i = 0; i < range_size; i++, key++) {
- *keyptr = htonl(min + key % range_size);
+ *keyptr = htons(min + key % range_size);
if (!ip_nat_used_tuple(tuple, conntrack))
return 1;
}
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 39fd4c2a2386..b98f7b08b084 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -428,7 +428,7 @@ ipt_log_target(struct sk_buff **pskb,
if (loginfo->logflags & IPT_LOG_NFLOG)
nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
- loginfo->prefix);
+ "%s", loginfo->prefix);
else
ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
loginfo->prefix);
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 143843285702..b847ee409efb 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -821,6 +821,7 @@ checkentry(const char *tablename,
/* Create our proc 'status' entry. */
curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent);
if (!curr_table->status_proc) {
+ vfree(hold);
printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n");
/* Destroy the created table */
spin_lock_bh(&recent_lock);
@@ -845,7 +846,6 @@ checkentry(const char *tablename,
spin_unlock_bh(&recent_lock);
vfree(curr_table->time_info);
vfree(curr_table->hash_table);
- vfree(hold);
vfree(curr_table->table);
vfree(curr_table);
return 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9f0cca4c4fae..4a538bc1683d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1662,6 +1662,8 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
+ if (IsReno(tp))
+ tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1);
/* clear xmit_retrans hint */
if (tp->retransmit_skb_hint &&
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0a673038344f..2e72f89a7019 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1103,7 +1103,7 @@ do_add_counters(void __user *user, unsigned int len)
write_lock_bh(&t->lock);
private = t->private;
- if (private->number != paddc->num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index a96c0de14b00..73c6300109d6 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -439,7 +439,7 @@ ip6t_log_target(struct sk_buff **pskb,
if (loginfo->logflags & IP6T_LOG_NFLOG)
nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
- loginfo->prefix);
+ "%s", loginfo->prefix);
else
ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
loginfo->prefix);
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 94dbdb8b458d..4f6b84c8f4ab 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -40,7 +40,7 @@ match(const struct sk_buff *skb,
memset(eui64, 0, sizeof(eui64));
- if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
+ if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
if (skb->nh.ipv6h->version == 0x6) {
memcpy(eui64, eth_hdr(skb)->h_source, 3);
memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 2dbf134d5266..811d998725bc 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -944,9 +944,9 @@ out:
return rc;
}
-static int ipx_map_frame_type(unsigned char type)
+static __be16 ipx_map_frame_type(unsigned char type)
{
- int rc = 0;
+ __be16 rc = 0;
switch (type) {
case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break;
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 67774448efd9..a394c6fe19a2 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -119,7 +119,7 @@ out:
return rc;
}
-static int ipxrtr_delete(long net)
+static int ipxrtr_delete(__u32 net)
{
struct ipx_route *r, *tmp;
int rc;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index c60273cad778..61cdda4e5d3b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -321,7 +321,7 @@ static int
nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
{
spin_lock_bh(&inst->lock);
- inst->flags = ntohs(flags);
+ inst->flags = flags;
spin_unlock_bh(&inst->lock);
return 0;
@@ -902,7 +902,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
if (nfula[NFULA_CFG_FLAGS-1]) {
u_int16_t flags =
*(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]);
- nfulnl_set_flags(inst, ntohl(flags));
+ nfulnl_set_flags(inst, ntohs(flags));
}
out_put:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 31eb83717c26..138ea92ed268 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -193,8 +193,10 @@ static void dev_watchdog(unsigned long arg)
netif_running(dev) &&
netif_carrier_ok(dev)) {
if (netif_queue_stopped(dev) &&
- (jiffies - dev->trans_start) > dev->watchdog_timeo) {
- printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
+ time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
+
+ printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
+ dev->name);
dev->tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
OpenPOWER on IntegriCloud