summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/kernel/core_mcpcia.c2
-rw-r--r--arch/alpha/kernel/err_titan.c1
-rw-r--r--arch/alpha/kernel/module.c8
-rw-r--r--arch/alpha/kernel/sys_nautilus.c6
-rw-r--r--arch/alpha/kernel/sys_noritake.c9
-rw-r--r--arch/alpha/kernel/sys_rawhide.c15
-rw-r--r--arch/alpha/kernel/sys_sio.c14
-rw-r--r--arch/alpha/kernel/sys_sx164.c2
-rw-r--r--arch/alpha/kernel/sys_titan.c3
-rw-r--r--arch/i386/kernel/nmi.c9
-rw-r--r--arch/i386/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86_64/kernel/functionlist1
-rw-r--r--arch/x86_64/kernel/nmi.c10
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S2
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/hwmon/w83627ehf.c6
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c1
-rw-r--r--drivers/kvm/mmu.c1
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c69
-rw-r--r--drivers/net/cxgb3/t3_hw.c18
-rw-r--r--drivers/net/sky2.c176
-rw-r--r--drivers/net/sky2.h11
-rw-r--r--drivers/net/spider_net.c2
-rw-r--r--drivers/spi/spi_s3c24xx.c4
-rw-r--r--fs/exec.c18
-rw-r--r--fs/ufs/inode.c29
-rw-r--r--include/asm-alpha/compiler.h47
-rw-r--r--include/asm-alpha/core_mcpcia.h2
-rw-r--r--include/asm-alpha/io.h1
-rw-r--r--include/asm-powerpc/systbl.h2
-rw-r--r--include/linux/io.h13
-rw-r--r--include/linux/plist.h54
-rw-r--r--include/linux/skbuff.h10
-rw-r--r--net/bridge/br_stp_if.c9
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/core/skbuff.c55
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/key/af_key.c90
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/sctp/socket.c54
-rw-r--r--net/sctp/ulpqueue.c9
-rw-r--r--net/sunrpc/svcauth_unix.c21
48 files changed, 548 insertions, 280 deletions
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c
index 8d019071190a..381fec0af52e 100644
--- a/arch/alpha/kernel/core_mcpcia.c
+++ b/arch/alpha/kernel/core_mcpcia.c
@@ -40,8 +40,6 @@
# define DBG_CFG(args)
#endif
-#define MCPCIA_MAX_HOSES 4
-
/*
* Given a bus, device, and function number, compute resulting
* configuration space address and setup the MCPCIA_HAXR2 register
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index febe71c6869f..543d96d7fa2b 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -16,6 +16,7 @@
#include <asm/smp.h>
#include <asm/err_common.h>
#include <asm/err_ev6.h>
+#include <asm/irq_regs.h>
#include "err_impl.h"
#include "proto.h"
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index aac6d4b22f7a..bd03dc94c72b 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -285,12 +285,12 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
reloc_overflow:
if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION)
printk(KERN_ERR
- "module %s: Relocation overflow vs section %d\n",
- me->name, sym->st_shndx);
+ "module %s: Relocation (type %lu) overflow vs section %d\n",
+ me->name, r_type, sym->st_shndx);
else
printk(KERN_ERR
- "module %s: Relocation overflow vs %s\n",
- me->name, strtab + sym->st_name);
+ "module %s: Relocation (type %lu) overflow vs %s\n",
+ me->name, r_type, strtab + sym->st_name);
return -ENOEXEC;
}
}
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index e7594a7cf585..920196bcbb61 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -70,6 +70,12 @@ nautilus_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
/* Preserve the IRQ set up by the console. */
u8 irq;
+ /* UP1500: AGP INTA is actually routed to IRQ 5, not IRQ 10 as
+ console reports. Check the device id of AGP bridge to distinguish
+ UP1500 from UP1000/1100. Note: 'pin' is 2 due to bridge swizzle. */
+ if (slot == 1 && pin == 2 &&
+ dev->bus->self && dev->bus->self->device == 0x700f)
+ return 5;
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
return irq;
}
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index de6ba3432e8a..eb2a1d63f484 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -66,6 +66,13 @@ noritake_startup_irq(unsigned int irq)
return 0;
}
+static void
+noritake_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ noritake_enable_irq(irq);
+}
+
static struct hw_interrupt_type noritake_irq_type = {
.typename = "NORITAKE",
.startup = noritake_startup_irq,
@@ -73,7 +80,7 @@ static struct hw_interrupt_type noritake_irq_type = {
.enable = noritake_enable_irq,
.disable = noritake_disable_irq,
.ack = noritake_disable_irq,
- .end = noritake_enable_irq,
+ .end = noritake_end_irq,
};
static void
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 581d08c70b92..672cb2df53df 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -52,6 +52,9 @@ rawhide_update_irq_hw(int hose, int mask)
*(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose));
}
+#define hose_exists(h) \
+ (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
+
static inline void
rawhide_enable_irq(unsigned int irq)
{
@@ -59,6 +62,9 @@ rawhide_enable_irq(unsigned int irq)
irq -= 16;
hose = irq / 24;
+ if (!hose_exists(hose)) /* if hose non-existent, exit */
+ return;
+
irq -= hose * 24;
mask = 1 << irq;
@@ -76,6 +82,9 @@ rawhide_disable_irq(unsigned int irq)
irq -= 16;
hose = irq / 24;
+ if (!hose_exists(hose)) /* if hose non-existent, exit */
+ return;
+
irq -= hose * 24;
mask = ~(1 << irq) | hose_irq_masks[hose];
@@ -93,6 +102,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
irq -= 16;
hose = irq / 24;
+ if (!hose_exists(hose)) /* if hose non-existent, exit */
+ return;
+
irq -= hose * 24;
mask1 = 1 << irq;
mask = ~mask1 | hose_irq_masks[hose];
@@ -169,6 +181,9 @@ rawhide_init_irq(void)
mcpcia_init_hoses();
+ /* Clear them all; only hoses that exist will be non-zero. */
+ for (i = 0; i < MCPCIA_MAX_HOSES; i++) cached_irq_masks[i] = 0;
+
for (hose = hose_head; hose; hose = hose->next) {
unsigned int h = hose->index;
unsigned int mask = hose_irq_masks[h];
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index a654014d202a..14b5a753aba5 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -84,12 +84,16 @@ alphabook1_init_arch(void)
static void __init
sio_pci_route(void)
{
-#if defined(ALPHA_RESTORE_SRM_SETUP)
- /* First, read and save the original setting. */
+ unsigned int orig_route_tab;
+
+ /* First, ALWAYS read and print the original setting. */
pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
- &saved_config.orig_route_tab);
+ &orig_route_tab);
printk("%s: PIRQ original 0x%x new 0x%x\n", __FUNCTION__,
- saved_config.orig_route_tab, alpha_mv.sys.sio.route_tab);
+ orig_route_tab, alpha_mv.sys.sio.route_tab);
+
+#if defined(ALPHA_RESTORE_SRM_SETUP)
+ saved_config.orig_route_tab = orig_route_tab;
#endif
/* Now override with desired setting. */
@@ -334,7 +338,7 @@ struct alpha_machine_vector avanti_mv __initmv = {
.pci_swizzle = common_swizzle,
.sys = { .sio = {
- .route_tab = 0x0b0a0e0f,
+ .route_tab = 0x0b0a050f, /* leave 14 for IDE, 9 for SND */
}}
};
ALIAS_MV(avanti)
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c
index 94ad68b7c0ae..41d4ad4c7c44 100644
--- a/arch/alpha/kernel/sys_sx164.c
+++ b/arch/alpha/kernel/sys_sx164.c
@@ -132,7 +132,7 @@ sx164_init_arch(void)
if (amask(AMASK_MAX) != 0
&& alpha_using_srm
- && (cpu->pal_revision & 0xffff) == 0x117) {
+ && (cpu->pal_revision & 0xffff) <= 0x117) {
__asm__ __volatile__(
"lda $16,8($31)\n"
"call_pal 9\n" /* Allow PALRES insns in kernel mode */
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 29ab7db81c30..f009b7bc0943 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -257,8 +257,7 @@ titan_dispatch_irqs(u64 mask)
*/
while (mask) {
/* convert to SRM vector... priority is <63> -> <0> */
- __asm__("ctlz %1, %0" : "=r"(vector) : "r"(mask));
- vector = 63 - vector;
+ vector = 63 - __kernel_ctlz(mask);
mask &= ~(1UL << vector); /* clear it out */
vector = 0x900 + (vector << 4); /* convert to SRM vector */
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index a98ba88a8c0c..9f1e8c1afab7 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -41,16 +41,17 @@ int nmi_watchdog_enabled;
* different subsystems this reservation system just tries to coordinate
* things a little
*/
-static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
-static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
-
-static cpumask_t backtrace_mask = CPU_MASK_NONE;
/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
* offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
*/
#define NMI_MAX_COUNTER_BITS 66
+#define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS)
+static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]);
+static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]);
+
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
/* nmi_active:
* >0: the lapic NMI watchdog is active, but can be disabled
* <0: the lapic NMI watchdog has not been set up, and cannot
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index ca51610955df..6f38f818380b 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -26,7 +26,7 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
jiffies = jiffies_64;
-_proxy_pda = 0;
+_proxy_pda = 1;
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist
index 01fa23580c85..7ae18ec12454 100644
--- a/arch/x86_64/kernel/functionlist
+++ b/arch/x86_64/kernel/functionlist
@@ -514,7 +514,6 @@
*(.text.dentry_open)
*(.text.dentry_iput)
*(.text.bio_alloc)
-*(.text.alloc_skb_from_cache)
*(.text.wait_on_page_bit)
*(.text.vfs_readdir)
*(.text.vfs_lstat)
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index a90996c27dc8..dfab9f167366 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -39,15 +39,17 @@ int panic_on_unrecovered_nmi;
* different subsystems this reservation system just tries to coordinate
* things a little
*/
-static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
-static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
-
-static cpumask_t backtrace_mask = CPU_MASK_NONE;
/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
* offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
*/
#define NMI_MAX_COUNTER_BITS 66
+#define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS)
+
+static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]);
+static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]);
+
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
/* nmi_active:
* >0: the lapic NMI watchdog is active, but can be disabled
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b73212c0a550..5176ecf006ee 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -13,7 +13,7 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
jiffies_64 = jiffies;
-_proxy_pda = 0;
+_proxy_pda = 1;
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(7); /* RWE */
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index f5c160caf9f4..5f066963f171 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -248,7 +248,7 @@ static unsigned long get_unmapped_area_mem(struct file *file,
{
if (!valid_mmap_phys_addr_range(pgoff, len))
return (unsigned long) -EINVAL;
- return pgoff;
+ return pgoff << PAGE_SHIFT;
}
/* can't do an in-place private mapping if there's no MMU */
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index da5828f2dfc2..01206ebb1cf2 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -407,7 +407,7 @@ static void w83627ehf_write_fan_div(struct i2c_client *client, int nr)
break;
case 4:
reg = (w83627ehf_read_value(client, W83627EHF_REG_DIODE) & 0x73)
- | ((data->fan_div[4] & 0x03) << 3)
+ | ((data->fan_div[4] & 0x03) << 2)
| ((data->fan_div[4] & 0x04) << 5);
w83627ehf_write_value(client, W83627EHF_REG_DIODE, reg);
break;
@@ -471,9 +471,9 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
time */
if (data->fan[i] == 0xff
&& data->fan_div[i] < 0x07) {
- dev_dbg(&client->dev, "Increasing fan %d "
+ dev_dbg(&client->dev, "Increasing fan%d "
"clock divider from %u to %u\n",
- i, div_from_reg(data->fan_div[i]),
+ i + 1, div_from_reg(data->fan_div[i]),
div_from_reg(data->fan_div[i] + 1));
data->fan_div[i]++;
w83627ehf_write_fan_div(client, i);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index fb19dbb31e42..ece31d2c6c64 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -344,8 +344,7 @@ config I2C_PARPORT_LIGHT
config I2C_PASEMI
tristate "PA Semi SMBus interface"
-# depends on PPC_PASEMI && I2C && PCI
- depends on I2C && PCI
+ depends on PPC_PASEMI && I2C && PCI
help
Supports the PA Semi PWRficient on-chip SMBus interfaces.
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index f54fb5d65cc4..bf89eeef74e9 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -141,7 +141,7 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
for (i = 0; i < msg->len - 1; i++)
TXFIFO_WR(smbus, msg->buf[i]);
- TXFIFO_WR(smbus, msg->buf[msg->len] |
+ TXFIFO_WR(smbus, msg->buf[msg->len-1] |
(stop ? MTXFIFO_STOP : 0));
}
@@ -226,7 +226,7 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
rd = RXFIFO_RD(smbus);
len = min_t(u8, (rd & MRXFIFO_DATA_M),
I2C_SMBUS_BLOCK_MAX);
- TXFIFO_WR(smbus, (len + 1) | MTXFIFO_READ |
+ TXFIFO_WR(smbus, len | MTXFIFO_READ |
MTXFIFO_STOP);
} else {
len = min_t(u8, data->block[0], I2C_SMBUS_BLOCK_MAX);
@@ -258,7 +258,7 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
rd = RXFIFO_RD(smbus);
len = min_t(u8, (rd & MRXFIFO_DATA_M),
I2C_SMBUS_BLOCK_MAX - len);
- TXFIFO_WR(smbus, (len + 1) | MTXFIFO_READ | MTXFIFO_STOP);
+ TXFIFO_WR(smbus, len | MTXFIFO_READ | MTXFIFO_STOP);
break;
default:
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index fdb576dcfaa8..ee561c569d5f 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -835,6 +835,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
key = arbel_key_to_hw_index(fmr->ibmr.lkey);
key &= dev->limits.num_mpts - 1;
+ key = adjust_key(dev, key);
fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
fmr->maps = 0;
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index e85b4c7c36f7..cab26f301eab 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -1171,6 +1171,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
* and zap two pdes instead of one.
*/
if (level == PT32_ROOT_LEVEL) {
+ page_offset &= ~7; /* kill rounding error */
page_offset <<= 1;
npte = 2;
}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 3096836d8bd3..c9f3dc4fd3ee 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1259,9 +1259,9 @@ static int smu_release(struct inode *inode, struct file *file)
set_current_state(TASK_UNINTERRUPTIBLE);
if (pp->cmd.status != 1)
break;
- spin_lock_irqsave(&pp->lock, flags);
- schedule();
spin_unlock_irqrestore(&pp->lock, flags);
+ schedule();
+ spin_lock_irqsave(&pp->lock, flags);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&pp->wait, &wait);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c3f9f599f134..a3d46ea37126 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2263,6 +2263,7 @@ config GIANFAR
tristate "Gianfar Ethernet"
depends on 85xx || 83xx || PPC_86xx
select PHYLIB
+ select CRC32
help
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
and MPC86xx family of chips, and the FEC on the 8540.
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
index e14862b43d17..483a594210a7 100644
--- a/drivers/net/cxgb3/cxgb3_defs.h
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -67,7 +67,10 @@ static inline union listen_entry *stid2entry(const struct tid_info *t,
static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
unsigned int tid)
{
- return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
+ struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
+ &(t->tid_tab[tid]) : NULL;
+
+ return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
}
/*
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 48649244673e..199e5066acf3 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -508,6 +508,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
spin_lock_bh(&td->tid_release_lock);
p->ctx = (void *)td->tid_release_list;
+ p->client = NULL;
td->tid_release_list = p;
if (!p->ctx)
schedule_work(&td->tid_release_task);
@@ -623,7 +624,8 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
- if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
+ t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
t3c_tid->
@@ -642,7 +644,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode] (dev, skb,
t3c_tid->ctx);
@@ -660,7 +662,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
@@ -689,6 +691,28 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
}
}
+/*
+ * Returns an sk_buff for a reply CPL message of size len. If the input
+ * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * is allocated. The input skb must be of size at least len. Note that this
+ * operation does not destroy the original skb data even if it decides to reuse
+ * the buffer.
+ */
+static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
+ int gfp)
+{
+ if (likely(!skb_cloned(skb))) {
+ BUG_ON(skb->len < len);
+ __skb_trim(skb, len);
+ skb_get(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ if (skb)
+ __skb_put(skb, len);
+ }
+ return skb;
+}
+
static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
{
union opcode_tid *p = cplhdr(skb);
@@ -696,30 +720,39 @@ static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
} else {
struct cpl_abort_req_rss *req = cplhdr(skb);
struct cpl_abort_rpl *rpl;
+ struct sk_buff *reply_skb;
+ unsigned int tid = GET_TID(req);
+ u8 cmd = req->status;
+
+ if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+ req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+ goto out;
- struct sk_buff *skb =
- alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
- if (!skb) {
+ reply_skb = cxgb3_get_cpl_reply_skb(skb,
+ sizeof(struct
+ cpl_abort_rpl),
+ GFP_ATOMIC);
+
+ if (!reply_skb) {
printk("do_abort_req_rss: couldn't get skb!\n");
goto out;
}
- skb->priority = CPL_PRIORITY_DATA;
- __skb_put(skb, sizeof(struct cpl_abort_rpl));
- rpl = cplhdr(skb);
+ reply_skb->priority = CPL_PRIORITY_DATA;
+ __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+ rpl = cplhdr(reply_skb);
rpl->wr.wr_hi =
htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
- rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
- OPCODE_TID(rpl) =
- htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
- rpl->cmd = req->status;
- cxgb3_ofld_send(dev, skb);
+ rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = cmd;
+ cxgb3_ofld_send(dev, reply_skb);
out:
return CPL_RET_BUF_DONE;
}
@@ -732,7 +765,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
(dev, skb, t3c_tid->ctx);
@@ -762,7 +795,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[opcode]) {
return t3c_tid->client->handlers[opcode] (dev, skb,
t3c_tid->ctx);
@@ -961,7 +994,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
for (tid = 0; tid < ti->ntids; tid++) {
te = lookup_tid(ti, tid);
BUG_ON(!te);
- if (te->ctx && te->client && te->client->redirect) {
+ if (te && te->ctx && te->client && te->client->redirect) {
update_tcb = te->client->redirect(te->ctx, old, new, e);
if (update_tcb) {
l2t_hold(L2DATA(tdev), e);
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index d83f075ef2d7..fb485d0a43d8 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1523,19 +1523,25 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
*/
int t3_phy_intr_handler(struct adapter *adapter)
{
- static const int intr_gpio_bits[] = { 8, 0x20 };
-
+ u32 mask, gpi = adapter_info(adapter)->gpio_intr;
u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
for_each_port(adapter, i) {
- if (cause & intr_gpio_bits[i]) {
- struct cphy *phy = &adap2pinfo(adapter, i)->phy;
- int phy_cause = phy->ops->intr_handler(phy);
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ mask = gpi - (gpi & (gpi - 1));
+ gpi -= mask;
+
+ if (!(p->port_type->caps & SUPPORTED_IRQ))
+ continue;
+
+ if (cause & mask) {
+ int phy_cause = p->phy.ops->intr_handler(&p->phy);
if (phy_cause & cphy_cause_link_change)
t3_link_changed(adapter, i);
if (phy_cause & cphy_cause_fifo_error)
- phy->fifo_errors++;
+ p->phy.fifo_errors++;
}
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 4a009b7b1777..ac36152c68bf 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -49,7 +49,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.13"
+#define DRV_VERSION "1.14"
#define PFX DRV_NAME " "
/*
@@ -123,7 +123,10 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
+#ifdef broken
+ /* This device causes data corruption problems that are not resolved */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
+#endif
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
@@ -740,12 +743,17 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
- if (hw->dev[port]->mtu > ETH_DATA_LEN) {
- /* set Tx GMAC FIFO Almost Empty Threshold */
- sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180);
- /* Disable Store & Forward mode for TX */
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
- }
+
+ /* set Tx GMAC FIFO Almost Empty Threshold */
+ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+ (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+
+ if (hw->dev[port]->mtu > ETH_DATA_LEN)
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_JUMBO_ENA | TX_STFW_DIS);
+ else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_JUMBO_DIS | TX_STFW_ENA);
}
}
@@ -1278,7 +1286,7 @@ static int sky2_up(struct net_device *dev)
/* Set almost empty threshold */
if (hw->chip_id == CHIP_ID_YUKON_EC_U
&& hw->chip_rev == CHIP_REV_YU_EC_U_A0)
- sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
+ sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
TX_RING_SIZE - 1);
@@ -1584,13 +1592,6 @@ static int sky2_down(struct net_device *dev)
sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
RB_RST_SET | RB_DIS_OP_MD);
- /* WA for dev. #4.209 */
- if (hw->chip_id == CHIP_ID_YUKON_EC_U
- && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- sky2->speed != SPEED_1000 ?
- TX_STFW_ENA : TX_STFW_DIS);
-
ctrl = gma_read16(hw, port, GM_GP_CTRL);
ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
gma_write16(hw, port, GM_GP_CTRL, ctrl);
@@ -1890,6 +1891,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
int err;
u16 ctl, mode;
u32 imask;
@@ -1897,9 +1899,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
- /* TSO on Yukon Ultra and MTU > 1500 not supported */
- if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
- dev->features &= ~NETIF_F_TSO;
+ if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_FE)
+ return -EINVAL;
if (!netif_running(dev)) {
dev->mtu = new_mtu;
@@ -1915,8 +1916,18 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
synchronize_irq(hw->pdev->irq);
- ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
- gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
+ if (new_mtu > ETH_DATA_LEN) {
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_JUMBO_ENA | TX_STFW_DIS);
+ dev->features &= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
+ } else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_JUMBO_DIS | TX_STFW_ENA);
+ }
+
+ ctl = gma_read16(hw, port, GM_GP_CTRL);
+ gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
sky2_rx_stop(sky2);
sky2_rx_clean(sky2);
@@ -1928,9 +1939,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
if (dev->mtu > ETH_DATA_LEN)
mode |= GM_SMOD_JUMBO_ENA;
- gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode);
+ gma_write16(hw, port, GM_SERIAL_MODE, mode);
- sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
+ sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
err = sky2_rx_start(sky2);
sky2_write32(hw, B0_IMSK, imask);
@@ -1938,7 +1949,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
if (err)
dev_close(dev);
else {
- gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
+ gma_write16(hw, port, GM_GP_CTRL, ctl);
netif_poll_enable(hw->dev[0]);
netif_wake_queue(dev);
@@ -2340,26 +2351,22 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
}
}
-/* This should never happen it is a fatal situation */
-static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
- const char *rxtx, u32 mask)
+/* This should never happen it is a bug. */
+static void sky2_le_error(struct sky2_hw *hw, unsigned port,
+ u16 q, unsigned ring_size)
{
struct net_device *dev = hw->dev[port];
struct sky2_port *sky2 = netdev_priv(dev);
- u32 imask;
-
- printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
- dev ? dev->name : "<not registered>", rxtx);
+ unsigned idx;
+ const u64 *le = (q == Q_R1 || q == Q_R2)
+ ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le;
- imask = sky2_read32(hw, B0_IMSK);
- imask &= ~mask;
- sky2_write32(hw, B0_IMSK, imask);
+ idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
+ printk(KERN_ERR PFX "%s: descriptor error q=%#x get=%u [%llx] put=%u\n",
+ dev->name, (unsigned) q, idx, (unsigned long long) le[idx],
+ (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
- if (dev) {
- spin_lock(&sky2->phy_lock);
- sky2_link_down(sky2);
- spin_unlock(&sky2->phy_lock);
- }
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
}
/* If idle then force a fake soft NAPI poll once a second
@@ -2383,23 +2390,15 @@ static void sky2_idle(unsigned long arg)
mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
}
-
-static int sky2_poll(struct net_device *dev0, int *budget)
+/* Hardware/software error handling */
+static void sky2_err_intr(struct sky2_hw *hw, u32 status)
{
- struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
- int work_limit = min(dev0->quota, *budget);
- int work_done = 0;
- u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
- if (status & Y2_IS_IRQ_PHY1)
- sky2_phy_intr(hw, 0);
-
- if (status & Y2_IS_IRQ_PHY2)
- sky2_phy_intr(hw, 1);
-
if (status & Y2_IS_IRQ_MAC1)
sky2_mac_intr(hw, 0);
@@ -2407,16 +2406,33 @@ static int sky2_poll(struct net_device *dev0, int *budget)
sky2_mac_intr(hw, 1);
if (status & Y2_IS_CHK_RX1)
- sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
+ sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE);
if (status & Y2_IS_CHK_RX2)
- sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
+ sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE);
if (status & Y2_IS_CHK_TXA1)
- sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
+ sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE);
if (status & Y2_IS_CHK_TXA2)
- sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
+ sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE);
+}
+
+static int sky2_poll(struct net_device *dev0, int *budget)
+{
+ struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
+ int work_limit = min(dev0->quota, *budget);
+ int work_done = 0;
+ u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+
+ if (unlikely(status & Y2_IS_ERROR))
+ sky2_err_intr(hw, status);
+
+ if (status & Y2_IS_IRQ_PHY1)
+ sky2_phy_intr(hw, 0);
+
+ if (status & Y2_IS_IRQ_PHY2)
+ sky2_phy_intr(hw, 1);
work_done = sky2_status_intr(hw, work_limit);
if (work_done < work_limit) {
@@ -2534,16 +2550,14 @@ static void sky2_reset(struct sky2_hw *hw)
int i;
/* disable ASF */
- if (hw->chip_id <= CHIP_ID_YUKON_EC) {
- if (hw->chip_id == CHIP_ID_YUKON_EX) {
- status = sky2_read16(hw, HCU_CCSR);
- status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
- HCU_CCSR_UC_STATE_MSK);
- sky2_write16(hw, HCU_CCSR, status);
- } else
- sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
- sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
- }
+ if (hw->chip_id == CHIP_ID_YUKON_EX) {
+ status = sky2_read16(hw, HCU_CCSR);
+ status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
+ HCU_CCSR_UC_STATE_MSK);
+ sky2_write16(hw, HCU_CCSR, status);
+ } else
+ sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
+ sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
/* do a SW reset */
sky2_write8(hw, B0_CTST, CS_RST_SET);
@@ -3328,6 +3342,36 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->len - B3_RI_WTO_R1);
}
+/* In order to do Jumbo packets on these chips, need to turn off the
+ * transmit store/forward. Therefore checksum offload won't work.
+ */
+static int no_tx_offload(struct net_device *dev)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+ const struct sky2_hw *hw = sky2->hw;
+
+ return dev->mtu > ETH_DATA_LEN &&
+ (hw->chip_id == CHIP_ID_YUKON_EX
+ || hw->chip_id == CHIP_ID_YUKON_EC_U);
+}
+
+static int sky2_set_tx_csum(struct net_device *dev, u32 data)
+{
+ if (data && no_tx_offload(dev))
+ return -EINVAL;
+
+ return ethtool_op_set_tx_csum(dev, data);
+}
+
+
+static int sky2_set_tso(struct net_device *dev, u32 data)
+{
+ if (data && no_tx_offload(dev))
+ return -EINVAL;
+
+ return ethtool_op_set_tso(dev, data);
+}
+
static const struct ethtool_ops sky2_ethtool_ops = {
.get_settings = sky2_get_settings,
.set_settings = sky2_set_settings,
@@ -3343,9 +3387,9 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_tx_csum = sky2_set_tx_csum,
.get_tso = ethtool_op_get_tso,
- .set_tso = ethtool_op_set_tso,
+ .set_tso = sky2_set_tso,
.get_rx_csum = sky2_get_rx_csum,
.set_rx_csum = sky2_set_rx_csum,
.get_strings = sky2_get_strings,
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index ac24bdc42976..5efb5afc45ba 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -288,6 +288,9 @@ enum {
| Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
| Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
+ Y2_IS_ERROR = Y2_IS_HW_ERR |
+ Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 |
+ Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
};
/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
@@ -738,6 +741,11 @@ enum {
TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
+
+ /* Threshold values for Yukon-EC Ultra and Extreme */
+ ECU_AE_THR = 0x0070, /* Almost Empty Threshold */
+ ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */
+ ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */
};
/* Descriptor Poll Timer Registers */
@@ -1631,6 +1639,9 @@ enum {
TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
+ TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+ TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+
GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 3b91af89e4c7..e3019d52c30f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -719,7 +719,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
spin_unlock_irqrestore(&chain->lock, flags);
- if (skb->protocol == htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL)
switch (skb->nh.iph->protocol) {
case IPPROTO_TCP:
hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 220abce63e4a..b10211c420ef 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -77,7 +77,7 @@ static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
switch (value) {
case BITBANG_CS_INACTIVE:
- hw->pdata->set_cs(hw->pdata, spi->chip_select, cspol^1);
+ hw->set_cs(hw->pdata, spi->chip_select, cspol^1);
break;
case BITBANG_CS_ACTIVE:
@@ -98,7 +98,7 @@ static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
/* write new configration */
writeb(spcon, hw->regs + S3C2410_SPCON);
- hw->pdata->set_cs(hw->pdata, spi->chip_select, cspol);
+ hw->set_cs(hw->pdata, spi->chip_select, cspol);
break;
}
diff --git a/fs/exec.c b/fs/exec.c
index 7e36c6f6f538..3155e915307a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1244,13 +1244,17 @@ EXPORT_SYMBOL(set_binfmt);
* name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
*/
-static void format_corename(char *corename, const char *pattern, long signr)
+static int format_corename(char *corename, const char *pattern, long signr)
{
const char *pat_ptr = pattern;
char *out_ptr = corename;
char *const out_end = corename + CORENAME_MAX_SIZE;
int rc;
int pid_in_pattern = 0;
+ int ispipe = 0;
+
+ if (*pattern == '|')
+ ispipe = 1;
/* Repeat as long as we have more pattern to process and more output
space */
@@ -1341,8 +1345,8 @@ static void format_corename(char *corename, const char *pattern, long signr)
*
* If core_pattern does not include a %p (as is the default)
* and core_uses_pid is set, then .%pid will be appended to
- * the filename */
- if (!pid_in_pattern
+ * the filename. Do not do this for piped commands. */
+ if (!ispipe && !pid_in_pattern
&& (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
rc = snprintf(out_ptr, out_end - out_ptr,
".%d", current->tgid);
@@ -1350,8 +1354,9 @@ static void format_corename(char *corename, const char *pattern, long signr)
goto out;
out_ptr += rc;
}
- out:
+out:
*out_ptr = 0;
+ return ispipe;
}
static void zap_process(struct task_struct *start)
@@ -1502,16 +1507,15 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
* uses lock_kernel()
*/
lock_kernel();
- format_corename(corename, core_pattern, signr);
+ ispipe = format_corename(corename, core_pattern, signr);
unlock_kernel();
- if (corename[0] == '|') {
+ if (ispipe) {
/* SIGPIPE can happen, but it's just never processed */
if(call_usermodehelper_pipe(corename+1, NULL, NULL, &file)) {
printk(KERN_INFO "Core dump to %s pipe failed\n",
corename);
goto fail_unlock;
}
- ispipe = 1;
} else
file = filp_open(corename,
O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 013d7afe7cde..f18b79122fa3 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -601,7 +601,7 @@ static void ufs_set_inode_ops(struct inode *inode)
ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
}
-static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
+static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
@@ -613,8 +613,10 @@ static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
*/
inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink);
- if (inode->i_nlink == 0)
+ if (inode->i_nlink == 0) {
ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
+ return -1;
+ }
/*
* Linux now has 32-bit uid and gid, so we can support EFT.
@@ -643,9 +645,10 @@ static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
}
+ return 0;
}
-static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
+static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
@@ -658,8 +661,10 @@ static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
*/
inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink);
- if (inode->i_nlink == 0)
+ if (inode->i_nlink == 0) {
ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
+ return -1;
+ }
/*
* Linux now has 32-bit uid and gid, so we can support EFT.
@@ -690,6 +695,7 @@ static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
}
+ return 0;
}
void ufs_read_inode(struct inode * inode)
@@ -698,6 +704,7 @@ void ufs_read_inode(struct inode * inode)
struct super_block * sb;
struct ufs_sb_private_info * uspi;
struct buffer_head * bh;
+ int err;
UFSD("ENTER, ino %lu\n", inode->i_ino);
@@ -720,14 +727,17 @@ void ufs_read_inode(struct inode * inode)
if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
- ufs2_read_inode(inode,
- ufs2_inode + ufs_inotofsbo(inode->i_ino));
+ err = ufs2_read_inode(inode,
+ ufs2_inode + ufs_inotofsbo(inode->i_ino));
} else {
struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
- ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
+ err = ufs1_read_inode(inode,
+ ufs_inode + ufs_inotofsbo(inode->i_ino));
}
+ if (err)
+ goto bad_inode;
inode->i_version++;
ufsi->i_lastfrag =
(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
@@ -888,6 +898,8 @@ void ufs_delete_inode (struct inode * inode)
loff_t old_i_size;
truncate_inode_pages(&inode->i_data, 0);
+ if (is_bad_inode(inode))
+ goto no_delete;
/*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
lock_kernel();
mark_inode_dirty(inode);
@@ -898,4 +910,7 @@ void ufs_delete_inode (struct inode * inode)
ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n");
ufs_free_inode (inode);
unlock_kernel();
+ return;
+no_delete:
+ clear_inode(inode); /* We must guarantee clearing of inode... */
}
diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h
index d2768cc3d7a4..da6bb199839c 100644
--- a/include/asm-alpha/compiler.h
+++ b/include/asm-alpha/compiler.h
@@ -17,9 +17,6 @@
# define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift)
# define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift)
# define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b)
-# define __kernel_cttz(x) __builtin_ctzl(x)
-# define __kernel_ctlz(x) __builtin_clzl(x)
-# define __kernel_ctpop(x) __builtin_popcountl(x)
#else
# define __kernel_insbl(val, shift) \
({ unsigned long __kir; \
@@ -49,17 +46,39 @@
({ unsigned long __kir; \
__asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \
__kir; })
+#endif
+
+#ifdef __alpha_cix__
+# if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
+# define __kernel_cttz(x) __builtin_ctzl(x)
+# define __kernel_ctlz(x) __builtin_clzl(x)
+# define __kernel_ctpop(x) __builtin_popcountl(x)
+# else
+# define __kernel_cttz(x) \
+ ({ unsigned long __kir; \
+ __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \
+ __kir; })
+# define __kernel_ctlz(x) \
+ ({ unsigned long __kir; \
+ __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
+ __kir; })
+# define __kernel_ctpop(x) \
+ ({ unsigned long __kir; \
+ __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
+ __kir; })
+# endif
+#else
# define __kernel_cttz(x) \
({ unsigned long __kir; \
- __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \
+ __asm__(".arch ev67; cttz %1,%0" : "=r"(__kir) : "r"(x)); \
__kir; })
# define __kernel_ctlz(x) \
({ unsigned long __kir; \
- __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
+ __asm__(".arch ev67; ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
__kir; })
# define __kernel_ctpop(x) \
({ unsigned long __kir; \
- __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
+ __asm__(".arch ev67; ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
__kir; })
#endif
@@ -78,16 +97,20 @@
#else
#define __kernel_ldbu(mem) \
({ unsigned char __kir; \
- __asm__("ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
+ __asm__(".arch ev56; \
+ ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
__kir; })
#define __kernel_ldwu(mem) \
({ unsigned short __kir; \
- __asm__("ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
+ __asm__(".arch ev56; \
+ ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
__kir; })
-#define __kernel_stb(val,mem) \
- __asm__("stb %1,%0" : "=m"(mem) : "r"(val))
-#define __kernel_stw(val,mem) \
- __asm__("stw %1,%0" : "=m"(mem) : "r"(val))
+#define __kernel_stb(val,mem) \
+ __asm__(".arch ev56; \
+ stb %1,%0" : "=m"(mem) : "r"(val))
+#define __kernel_stw(val,mem) \
+ __asm__(".arch ev56; \
+ stw %1,%0" : "=m"(mem) : "r"(val))
#endif
#ifdef __KERNEL__
diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h
index 980a3c51b18e..525b4f6a7ace 100644
--- a/include/asm-alpha/core_mcpcia.h
+++ b/include/asm-alpha/core_mcpcia.h
@@ -72,6 +72,8 @@
*
*/
+#define MCPCIA_MAX_HOSES 4
+
#define MCPCIA_MID(m) ((unsigned long)(m) << 33)
/* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively.
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index 24bdcc8b63aa..21a86f1a05b3 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -113,6 +113,7 @@ static inline unsigned long virt_to_bus(void *address)
unsigned long bus = phys + __direct_map_base;
return phys <= __direct_map_size ? bus : 0;
}
+#define isa_virt_to_bus virt_to_bus
static inline void *bus_to_virt(unsigned long address)
{
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
index 8d853c554631..0b00068313f9 100644
--- a/include/asm-powerpc/systbl.h
+++ b/include/asm-powerpc/systbl.h
@@ -288,7 +288,7 @@ COMPAT_SYS(ppoll)
SYSCALL_SPU(unshare)
SYSCALL_SPU(splice)
SYSCALL_SPU(tee)
-SYSCALL_SPU(vmsplice)
+COMPAT_SYS_SPU(vmsplice)
COMPAT_SYS_SPU(openat)
SYSCALL_SPU(mkdirat)
SYSCALL_SPU(mknodat)
diff --git a/include/linux/io.h b/include/linux/io.h
index c244a0cc9319..09d351236379 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -33,9 +33,22 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
/*
* Managed iomap interface
*/
+#ifdef CONFIG_HAS_IOPORT
void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr);
void devm_ioport_unmap(struct device *dev, void __iomem *addr);
+#else
+static inline void __iomem *devm_ioport_map(struct device *dev,
+ unsigned long port,
+ unsigned int nr)
+{
+ return NULL;
+}
+
+static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+{
+}
+#endif
void __iomem * devm_ioremap(struct device *dev, unsigned long offset,
unsigned long size);
diff --git a/include/linux/plist.h b/include/linux/plist.h
index b95818a037ad..85de2f055874 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -97,9 +97,9 @@ struct plist_node {
#endif
/**
- * #PLIST_HEAD_INIT - static struct plist_head initializer
- *
+ * PLIST_HEAD_INIT - static struct plist_head initializer
* @head: struct plist_head variable name
+ * @_lock: lock to initialize for this list
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
@@ -109,8 +109,7 @@ struct plist_node {
}
/**
- * #PLIST_NODE_INIT - static struct plist_node initializer
- *
+ * PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
*/
@@ -122,8 +121,8 @@ struct plist_node {
/**
* plist_head_init - dynamic struct plist_head initializer
- *
* @head: &struct plist_head pointer
+ * @lock: list spinlock, remembered for debugging
*/
static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -137,7 +136,6 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
/**
* plist_node_init - Dynamic struct plist_node initializer
- *
* @node: &struct plist_node pointer
* @prio: initial node priority
*/
@@ -152,49 +150,46 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
/**
* plist_for_each - iterate over the plist
- *
- * @pos1: the type * to use as a loop counter.
- * @head: the head for your list.
+ * @pos: the type * to use as a loop counter
+ * @head: the head for your list
*/
#define plist_for_each(pos, head) \
list_for_each_entry(pos, &(head)->node_list, plist.node_list)
/**
- * plist_for_each_entry_safe - iterate over a plist of given type safe
- * against removal of list entry
+ * plist_for_each_safe - iterate safely over a plist of given type
+ * @pos: the type * to use as a loop counter
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list
*
- * @pos1: the type * to use as a loop counter.
- * @n1: another type * to use as temporary storage
- * @head: the head for your list.
+ * Iterate over a plist of given type, safe against removal of list entry.
*/
#define plist_for_each_safe(pos, n, head) \
list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list)
/**
* plist_for_each_entry - iterate over list of given type
- *
- * @pos: the type * to use as a loop counter.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
+ * @pos: the type * to use as a loop counter
+ * @head: the head for your list
+ * @mem: the name of the list_struct within the struct
*/
#define plist_for_each_entry(pos, head, mem) \
list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list)
/**
- * plist_for_each_entry_safe - iterate over list of given type safe against
- * removal of list entry
- *
- * @pos: the type * to use as a loop counter.
+ * plist_for_each_entry_safe - iterate safely over list of given type
+ * @pos: the type * to use as a loop counter
* @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @m: the name of the list_struct within the struct.
+ * @head: the head for your list
+ * @m: the name of the list_struct within the struct
+ *
+ * Iterate over list of given type, safe against removal of list entry.
*/
#define plist_for_each_entry_safe(pos, n, head, m) \
list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list)
/**
* plist_head_empty - return !0 if a plist_head is empty
- *
* @head: &struct plist_head pointer
*/
static inline int plist_head_empty(const struct plist_head *head)
@@ -204,7 +199,6 @@ static inline int plist_head_empty(const struct plist_head *head)
/**
* plist_node_empty - return !0 if plist_node is not on a list
- *
* @node: &struct plist_node pointer
*/
static inline int plist_node_empty(const struct plist_node *node)
@@ -216,10 +210,9 @@ static inline int plist_node_empty(const struct plist_node *node)
/**
* plist_first_entry - get the struct for the first entry
- *
- * @ptr: the &struct plist_head pointer.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
+ * @head: the &struct plist_head pointer
+ * @type: the type of the struct this is embedded in
+ * @member: the name of the list_struct within the struct
*/
#ifdef CONFIG_DEBUG_PI_LIST
# define plist_first_entry(head, type, member) \
@@ -234,7 +227,6 @@ static inline int plist_node_empty(const struct plist_node *node)
/**
* plist_first - return the first node (and thus, highest priority)
- *
* @head: the &struct plist_head pointer
*
* Assumes the plist is _not_ empty.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 82f43ad478c7..5992f65b4184 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -346,9 +346,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, 1, -1);
}
-extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
- unsigned int size,
- gfp_t priority);
extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority);
@@ -622,6 +619,13 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
list->qlen = 0;
}
+static inline void skb_queue_head_init_class(struct sk_buff_head *list,
+ struct lock_class_key *class)
+{
+ skb_queue_head_init(list);
+ lockdep_set_class(&list->lock, class);
+}
+
/*
* Insert an sk_buff at the start of a list.
*
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 58d13f2bd121..a285897a2fb4 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -126,7 +126,9 @@ void br_stp_disable_port(struct net_bridge_port *p)
/* called under bridge lock */
void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
{
- unsigned char oldaddr[6];
+ /* should be aligned on 2 bytes for compare_ether_addr() */
+ unsigned short oldaddr_aligned[ETH_ALEN >> 1];
+ unsigned char *oldaddr = (unsigned char *)oldaddr_aligned;
struct net_bridge_port *p;
int wasroot;
@@ -151,11 +153,14 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
br_become_root_bridge(br);
}
-static const unsigned char br_mac_zero[6];
+/* should be aligned on 2 bytes for compare_ether_addr() */
+static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1];
/* called under bridge lock */
void br_stp_recalculate_bridge_id(struct net_bridge *br)
{
+ const unsigned char *br_mac_zero =
+ (const unsigned char *)br_mac_zero_aligned;
const unsigned char *addr = br_mac_zero;
struct net_bridge_port *p;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index cfc60019cf92..841e3f32cab1 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1331,6 +1331,8 @@ void neigh_parms_destroy(struct neigh_parms *parms)
kfree(parms);
}
+static struct lock_class_key neigh_table_proxy_queue_class;
+
void neigh_table_init_no_netlink(struct neigh_table *tbl)
{
unsigned long now = jiffies;
@@ -1379,7 +1381,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
init_timer(&tbl->proxy_timer);
tbl->proxy_timer.data = (unsigned long)tbl;
tbl->proxy_timer.function = neigh_proxy_process;
- skb_queue_head_init(&tbl->proxy_queue);
+ skb_queue_head_init_class(&tbl->proxy_queue,
+ &neigh_table_proxy_queue_class);
tbl->last_flush = now;
tbl->last_rand = now + tbl->parms.reachable_time * 20;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index da1019451ccb..4581ece48bb2 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -471,6 +471,13 @@ int __netpoll_rx(struct sk_buff *skb)
if (skb->len < len || len < iph->ihl*4)
goto out;
+ /*
+ * Our transport medium may have padded the buffer out.
+ * Now We trim to the true length of the frame.
+ */
+ if (pskb_trim_rcsum(skb, len))
+ goto out;
+
if (iph->protocol != IPPROTO_UDP)
goto out;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 87573ae35b02..336958fbbcb2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -197,61 +197,6 @@ nodata:
}
/**
- * alloc_skb_from_cache - allocate a network buffer
- * @cp: kmem_cache from which to allocate the data area
- * (object size must be big enough for @size bytes + skb overheads)
- * @size: size to allocate
- * @gfp_mask: allocation mask
- *
- * Allocate a new &sk_buff. The returned buffer has no headroom and
- * tail room of size bytes. The object has a reference count of one.
- * The return is the buffer. On a failure the return is %NULL.
- *
- * Buffers may only be allocated from interrupts using a @gfp_mask of
- * %GFP_ATOMIC.
- */
-struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
- unsigned int size,
- gfp_t gfp_mask)
-{
- struct sk_buff *skb;
- u8 *data;
-
- /* Get the HEAD */
- skb = kmem_cache_alloc(skbuff_head_cache,
- gfp_mask & ~__GFP_DMA);
- if (!skb)
- goto out;
-
- /* Get the DATA. */
- size = SKB_DATA_ALIGN(size);
- data = kmem_cache_alloc(cp, gfp_mask);
- if (!data)
- goto nodata;
-
- memset(skb, 0, offsetof(struct sk_buff, truesize));
- skb->truesize = size + sizeof(struct sk_buff);
- atomic_set(&skb->users, 1);
- skb->head = data;
- skb->data = data;
- skb->tail = data;
- skb->end = data + size;
-
- atomic_set(&(skb_shinfo(skb)->dataref), 1);
- skb_shinfo(skb)->nr_frags = 0;
- skb_shinfo(skb)->gso_size = 0;
- skb_shinfo(skb)->gso_segs = 0;
- skb_shinfo(skb)->gso_type = 0;
- skb_shinfo(skb)->frag_list = NULL;
-out:
- return skb;
-nodata:
- kmem_cache_free(skbuff_head_cache, skb);
- skb = NULL;
- goto out;
-}
-
-/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @length: length to allocate
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index eabd6838f50a..0eb7d596d470 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -138,7 +138,6 @@ static void irda_disconnect_indication(void *instance, void *sap,
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
- sock_orphan(sk);
release_sock(sk);
/* Close our TSAP.
@@ -1446,7 +1445,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
*/
ret = sock_error(sk);
if (ret)
- break;
+ ;
else if (sk->sk_shutdown & RCV_SHUTDOWN)
;
else if (noblock)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index a4e7e2db0ff3..345019345f09 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -630,6 +630,35 @@ pfkey_sockaddr_size(sa_family_t family)
/* NOTREACHED */
}
+static inline int pfkey_mode_from_xfrm(int mode)
+{
+ switch(mode) {
+ case XFRM_MODE_TRANSPORT:
+ return IPSEC_MODE_TRANSPORT;
+ case XFRM_MODE_TUNNEL:
+ return IPSEC_MODE_TUNNEL;
+ case XFRM_MODE_BEET:
+ return IPSEC_MODE_BEET;
+ default:
+ return -1;
+ }
+}
+
+static inline int pfkey_mode_to_xfrm(int mode)
+{
+ switch(mode) {
+ case IPSEC_MODE_ANY: /*XXX*/
+ case IPSEC_MODE_TRANSPORT:
+ return XFRM_MODE_TRANSPORT;
+ case IPSEC_MODE_TUNNEL:
+ return XFRM_MODE_TUNNEL;
+ case IPSEC_MODE_BEET:
+ return XFRM_MODE_BEET;
+ default:
+ return -1;
+ }
+}
+
static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, int hsc)
{
struct sk_buff *skb;
@@ -651,6 +680,7 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
int encrypt_key_size = 0;
int sockaddr_size;
struct xfrm_encap_tmpl *natt = NULL;
+ int mode;
/* address family check */
sockaddr_size = pfkey_sockaddr_size(x->props.family);
@@ -928,7 +958,11 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys,
sa2 = (struct sadb_x_sa2 *) skb_put(skb, sizeof(struct sadb_x_sa2));
sa2->sadb_x_sa2_len = sizeof(struct sadb_x_sa2)/sizeof(uint64_t);
sa2->sadb_x_sa2_exttype = SADB_X_EXT_SA2;
- sa2->sadb_x_sa2_mode = x->props.mode + 1;
+ if ((mode = pfkey_mode_from_xfrm(x->props.mode)) < 0) {
+ kfree_skb(skb);
+ return ERR_PTR(-EINVAL);
+ }
+ sa2->sadb_x_sa2_mode = mode;
sa2->sadb_x_sa2_reserved1 = 0;
sa2->sadb_x_sa2_reserved2 = 0;
sa2->sadb_x_sa2_sequence = 0;
@@ -1155,9 +1189,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
if (ext_hdrs[SADB_X_EXT_SA2-1]) {
struct sadb_x_sa2 *sa2 = (void*)ext_hdrs[SADB_X_EXT_SA2-1];
- x->props.mode = sa2->sadb_x_sa2_mode;
- if (x->props.mode)
- x->props.mode--;
+ int mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode);
+ if (mode < 0) {
+ err = -EINVAL;
+ goto out;
+ }
+ x->props.mode = mode;
x->props.reqid = sa2->sadb_x_sa2_reqid;
}
@@ -1218,7 +1255,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
struct sadb_address *saddr, *daddr;
struct sadb_msg *out_hdr;
struct xfrm_state *x = NULL;
- u8 mode;
+ int mode;
u32 reqid;
u8 proto;
unsigned short family;
@@ -1233,7 +1270,9 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
return -EINVAL;
if ((sa2 = ext_hdrs[SADB_X_EXT_SA2-1]) != NULL) {
- mode = sa2->sadb_x_sa2_mode - 1;
+ mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode);
+ if (mode < 0)
+ return -EINVAL;
reqid = sa2->sadb_x_sa2_reqid;
} else {
mode = 0;
@@ -1756,6 +1795,7 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct sockaddr_in6 *sin6;
#endif
+ int mode;
if (xp->xfrm_nr >= XFRM_MAX_DEPTH)
return -ELOOP;
@@ -1764,7 +1804,9 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
return -EINVAL;
t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
- t->mode = rq->sadb_x_ipsecrequest_mode-1;
+ if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
+ return -EINVAL;
+ t->mode = mode;
if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE)
t->optional = 1;
else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
@@ -1877,7 +1919,7 @@ static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp)
return skb;
}
-static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir)
+static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir)
{
struct sadb_msg *hdr;
struct sadb_address *addr;
@@ -2014,6 +2056,7 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
struct sadb_x_ipsecrequest *rq;
struct xfrm_tmpl *t = xp->xfrm_vec + i;
int req_size;
+ int mode;
req_size = sizeof(struct sadb_x_ipsecrequest);
if (t->mode == XFRM_MODE_TUNNEL)
@@ -2027,7 +2070,9 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
memset(rq, 0, sizeof(*rq));
rq->sadb_x_ipsecrequest_len = req_size;
rq->sadb_x_ipsecrequest_proto = t->id.proto;
- rq->sadb_x_ipsecrequest_mode = t->mode+1;
+ if ((mode = pfkey_mode_from_xfrm(t->mode)) < 0)
+ return -EINVAL;
+ rq->sadb_x_ipsecrequest_mode = mode;
rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE;
if (t->reqid)
rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE;
@@ -2089,6 +2134,8 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i
hdr->sadb_msg_len = size / sizeof(uint64_t);
hdr->sadb_msg_reserved = atomic_read(&xp->refcnt);
+
+ return 0;
}
static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
@@ -2102,7 +2149,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
err = PTR_ERR(out_skb);
goto out;
}
- pfkey_xfrm_policy2msg(out_skb, xp, dir);
+ err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
+ if (err < 0)
+ return err;
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = PF_KEY_V2;
@@ -2327,7 +2376,9 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, struct sadb
err = PTR_ERR(out_skb);
goto out;
}
- pfkey_xfrm_policy2msg(out_skb, xp, dir);
+ err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
+ if (err < 0)
+ goto out;
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version;
@@ -2409,6 +2460,7 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
{
int err;
struct sadb_x_ipsecrequest *rq2;
+ int mode;
if (len <= sizeof(struct sadb_x_ipsecrequest) ||
len < rq1->sadb_x_ipsecrequest_len)
@@ -2439,7 +2491,9 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
return -EINVAL;
m->proto = rq1->sadb_x_ipsecrequest_proto;
- m->mode = rq1->sadb_x_ipsecrequest_mode - 1;
+ if ((mode = pfkey_mode_to_xfrm(rq1->sadb_x_ipsecrequest_mode)) < 0)
+ return -EINVAL;
+ m->mode = mode;
m->reqid = rq1->sadb_x_ipsecrequest_reqid;
return ((int)(rq1->sadb_x_ipsecrequest_len +
@@ -2579,12 +2633,15 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
struct pfkey_dump_data *data = ptr;
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
+ int err;
out_skb = pfkey_xfrm_policy2msg_prep(xp);
if (IS_ERR(out_skb))
return PTR_ERR(out_skb);
- pfkey_xfrm_policy2msg(out_skb, xp, dir);
+ err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
+ if (err < 0)
+ return err;
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = data->hdr->sadb_msg_version;
@@ -3513,7 +3570,10 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
for (i = 0, mp = m; i < num_bundles; i++, mp++) {
/* old ipsecrequest */
- if (set_ipsecrequest(skb, mp->proto, mp->mode + 1,
+ int mode = pfkey_mode_from_xfrm(mp->mode);
+ if (mode < 0)
+ return -EINVAL;
+ if (set_ipsecrequest(skb, mp->proto, mode,
(mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
mp->reqid, mp->old_family,
&mp->old_saddr, &mp->old_daddr) < 0) {
@@ -3521,7 +3581,7 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
}
/* new ipsecrequest */
- if (set_ipsecrequest(skb, mp->proto, mp->mode + 1,
+ if (set_ipsecrequest(skb, mp->proto, mode,
(mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
mp->reqid, mp->new_family,
&mp->new_saddr, &mp->new_daddr) < 0) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index e73d8f546c6b..c48b0f49f003 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -443,6 +443,7 @@ static int netlink_release(struct socket *sock)
return 0;
netlink_remove(sk);
+ sock_orphan(sk);
nlk = nlk_sk(sk);
spin_lock(&nlk->cb_lock);
@@ -457,7 +458,6 @@ static int netlink_release(struct socket *sock)
/* OK. Socket is unlinked, and, therefore,
no new packets will arrive */
- sock_orphan(sk);
sock->sk = NULL;
wake_up_interruptible_all(&nlk->wait);
@@ -1412,9 +1412,9 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
return -ECONNREFUSED;
}
nlk = nlk_sk(sk);
- /* A dump is in progress... */
+ /* A dump or destruction is in progress... */
spin_lock(&nlk->cb_lock);
- if (nlk->cb) {
+ if (nlk->cb || sock_flag(sk, SOCK_DEAD)) {
spin_unlock(&nlk->cb_lock);
netlink_destroy_callback(cb);
sock_put(sk);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 536298c2eda2..a1d026f12b0e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -627,6 +627,12 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
retval = -EINVAL;
goto err_bindx_rem;
}
+
+ if (!af->addr_valid(sa_addr, sp, NULL)) {
+ retval = -EADDRNOTAVAIL;
+ goto err_bindx_rem;
+ }
+
if (sa_addr->v4.sin_port != htons(bp->port)) {
retval = -EINVAL;
goto err_bindx_rem;
@@ -5638,6 +5644,36 @@ void sctp_wait_for_close(struct sock *sk, long timeout)
finish_wait(sk->sk_sleep, &wait);
}
+static void sctp_sock_rfree_frag(struct sk_buff *skb)
+{
+ struct sk_buff *frag;
+
+ if (!skb->data_len)
+ goto done;
+
+ /* Don't forget the fragments. */
+ for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next)
+ sctp_sock_rfree_frag(frag);
+
+done:
+ sctp_sock_rfree(skb);
+}
+
+static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
+{
+ struct sk_buff *frag;
+
+ if (!skb->data_len)
+ goto done;
+
+ /* Don't forget the fragments. */
+ for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next)
+ sctp_skb_set_owner_r_frag(frag, sk);
+
+done:
+ sctp_skb_set_owner_r(skb, sk);
+}
+
/* Populate the fields of the newsk from the oldsk and migrate the assoc
* and its messages to the newsk.
*/
@@ -5692,10 +5728,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
- sctp_sock_rfree(skb);
+ sctp_sock_rfree_frag(skb);
__skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
- sctp_skb_set_owner_r(skb, newsk);
+ sctp_skb_set_owner_r_frag(skb, newsk);
}
}
@@ -5723,10 +5759,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
- sctp_sock_rfree(skb);
+ sctp_sock_rfree_frag(skb);
__skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
- sctp_skb_set_owner_r(skb, newsk);
+ sctp_skb_set_owner_r_frag(skb, newsk);
}
}
@@ -5738,6 +5774,16 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
}
+ sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) {
+ sctp_sock_rfree_frag(skb);
+ sctp_skb_set_owner_r_frag(skb, newsk);
+ }
+
+ sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) {
+ sctp_sock_rfree_frag(skb);
+ sctp_skb_set_owner_r_frag(skb, newsk);
+ }
+
/* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a
* TCP-style socket..
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index bfb197e37da3..b29e3e4b72c9 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -190,7 +190,14 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if (!sctp_sk(sk)->pd_mode) {
queue = &sk->sk_receive_queue;
} else if (ulpq->pd_mode) {
- if (event->msg_flags & MSG_NOTIFICATION)
+ /* If the association is in partial delivery, we
+ * need to finish delivering the partially processed
+ * packet before passing any other data. This is
+ * because we don't truly support stream interleaving.
+ */
+ if ((event->msg_flags & MSG_NOTIFICATION) ||
+ (SCTP_DATA_NOT_FRAG ==
+ (event->msg_flags & SCTP_DATA_FRAG_MASK)))
queue = &sctp_sk(sk)->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 9bae4090254c..2bd23ea2aa8b 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -383,7 +383,10 @@ void svcauth_unix_purge(void)
static inline struct ip_map *
ip_map_cached_get(struct svc_rqst *rqstp)
{
- struct ip_map *ipm = rqstp->rq_sock->sk_info_authunix;
+ struct ip_map *ipm;
+ struct svc_sock *svsk = rqstp->rq_sock;
+ spin_lock_bh(&svsk->sk_defer_lock);
+ ipm = svsk->sk_info_authunix;
if (ipm != NULL) {
if (!cache_valid(&ipm->h)) {
/*
@@ -391,12 +394,14 @@ ip_map_cached_get(struct svc_rqst *rqstp)
* remembered, e.g. by a second mount from the
* same IP address.
*/
- rqstp->rq_sock->sk_info_authunix = NULL;
+ svsk->sk_info_authunix = NULL;
+ spin_unlock_bh(&svsk->sk_defer_lock);
cache_put(&ipm->h, &ip_map_cache);
return NULL;
}
cache_get(&ipm->h);
}
+ spin_unlock_bh(&svsk->sk_defer_lock);
return ipm;
}
@@ -405,9 +410,15 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
{
struct svc_sock *svsk = rqstp->rq_sock;
- if (svsk->sk_sock->type == SOCK_STREAM && svsk->sk_info_authunix == NULL)
- svsk->sk_info_authunix = ipm; /* newly cached, keep the reference */
- else
+ spin_lock_bh(&svsk->sk_defer_lock);
+ if (svsk->sk_sock->type == SOCK_STREAM &&
+ svsk->sk_info_authunix == NULL) {
+ /* newly cached, keep the reference */
+ svsk->sk_info_authunix = ipm;
+ ipm = NULL;
+ }
+ spin_unlock_bh(&svsk->sk_defer_lock);
+ if (ipm)
cache_put(&ipm->h, &ip_map_cache);
}
OpenPOWER on IntegriCloud