summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/acpi/boot.c8
-rw-r--r--arch/x86_64/kernel/pci-nommu.c7
-rw-r--r--arch/x86_64/kernel/traps.c21
-rw-r--r--arch/x86_64/mm/srat.c15
-rw-r--r--drivers/net/b44.c28
-rw-r--r--mm/slab.c11
6 files changed, 65 insertions, 25 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 40e5aba3ad3d..daee69579b1c 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -1066,6 +1066,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
},
},
+ {
+ .callback = disable_acpi_pci,
+ .ident = "HP xw9300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
+ },
+ },
{}
};
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 44adcc2d5e5b..1f6ecc62061d 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -12,9 +12,10 @@ static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
if (hwdev && bus + size > *hwdev->dma_mask) {
- printk(KERN_ERR
- "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
- name, (long long)bus, size, (long long)*hwdev->dma_mask);
+ if (*hwdev->dma_mask >= 0xffffffffULL)
+ printk(KERN_ERR
+ "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
+ name, (long long)bus, size, (long long)*hwdev->dma_mask);
return 0;
}
return 1;
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 6b87268c5c2e..cea335e8746c 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -102,6 +102,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
{
if (regs->eflags & X86_EFLAGS_IF)
local_irq_disable();
+ /* Make sure to not schedule here because we could be running
+ on an exception stack. */
preempt_enable_no_resched();
}
@@ -483,8 +485,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
{
struct task_struct *tsk = current;
- conditional_sti(regs);
-
tsk->thread.error_code = error_code;
tsk->thread.trap_no = trapnr;
@@ -521,6 +521,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return; \
+ conditional_sti(regs); \
do_trap(trapnr, signr, str, regs, error_code, NULL); \
}
@@ -535,6 +536,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return; \
+ conditional_sti(regs); \
do_trap(trapnr, signr, str, regs, error_code, &info); \
}
@@ -548,7 +550,17 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
DO_ERROR(18, SIGSEGV, "reserved", reserved)
-DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
+
+/* Runs on IST stack */
+asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
+{
+ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
+ 12, SIGBUS) == NOTIFY_STOP)
+ return;
+ preempt_conditional_sti(regs);
+ do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
+ preempt_conditional_cli(regs);
+}
asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
{
@@ -682,8 +694,9 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
return;
}
+ preempt_conditional_sti(regs);
do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
- return;
+ preempt_conditional_cli(regs);
}
/* Help handler running on IST stack to switch back to user stack
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 15ae9fcd65a7..e1513532df29 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata;
static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
static int found_add_area __initdata;
-int hotadd_percent __initdata = 10;
+int hotadd_percent __initdata = 0;
+#ifndef RESERVE_HOTADD
+#define hotadd_percent 0 /* Ignore all settings */
+#endif
static u8 pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
@@ -103,6 +106,7 @@ static __init void bad_srat(void)
int i;
printk(KERN_ERR "SRAT: SRAT not used.\n");
acpi_numa = -1;
+ found_add_area = 0;
for (i = 0; i < MAX_LOCAL_APIC; i++)
apicid_to_node[i] = NUMA_NO_NODE;
for (i = 0; i < MAX_NUMNODES; i++)
@@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
int pxm, node;
if (srat_disabled())
return;
- if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
+ if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
+ bad_srat();
return;
}
if (pa->flags.enabled == 0)
@@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd)
allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
allowed = (allowed / 100) * hotadd_percent;
if (allocated + mem > allowed) {
+ unsigned long range;
/* Give them at least part of their hotadd memory upto hotadd_percent
It would be better to spread the limit out
over multiple hotplug areas, but that is too complicated
right now */
if (allocated >= allowed)
return 0;
- pages = (allowed - allocated + mem) / sizeof(struct page);
+ range = allowed - allocated;
+ pages = (range / PAGE_SIZE);
mem = pages * sizeof(struct page);
- nd->end = nd->start + pages*PAGE_SIZE;
+ nd->end = nd->start + range;
}
/* Not completely fool proof, but a good sanity check */
addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 3d306681919e..d8233e0b7899 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -650,9 +650,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
/* Hardware bug work-around, the chip is unable to do PCI DMA
to/from anything above 1GB :-( */
- if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
+ if (dma_mapping_error(mapping) ||
+ mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
/* Sigh... */
- pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+ if (!dma_mapping_error(mapping))
+ pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
if (skb == NULL)
@@ -660,8 +662,10 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
mapping = pci_map_single(bp->pdev, skb->data,
RX_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
- if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
- pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(mapping) ||
+ mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
+ if (!dma_mapping_error(mapping))
+ pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
return -ENOMEM;
}
@@ -967,9 +971,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- if (mapping + len > B44_DMA_MASK) {
+ if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
/* Chip can't handle DMA to/from >1GB, use bounce buffer */
- pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
+ if (!dma_mapping_error(mapping))
+ pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
GFP_ATOMIC|GFP_DMA);
@@ -978,8 +983,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = pci_map_single(bp->pdev, bounce_skb->data,
len, PCI_DMA_TODEVICE);
- if (mapping + len > B44_DMA_MASK) {
- pci_unmap_single(bp->pdev, mapping,
+ if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
+ if (!dma_mapping_error(mapping))
+ pci_unmap_single(bp->pdev, mapping,
len, PCI_DMA_TODEVICE);
dev_kfree_skb_any(bounce_skb);
goto err_out;
@@ -1203,7 +1209,8 @@ static int b44_alloc_consistent(struct b44 *bp)
DMA_TABLE_BYTES,
DMA_BIDIRECTIONAL);
- if (rx_ring_dma + size > B44_DMA_MASK) {
+ if (dma_mapping_error(rx_ring_dma) ||
+ rx_ring_dma + size > B44_DMA_MASK) {
kfree(rx_ring);
goto out_err;
}
@@ -1229,7 +1236,8 @@ static int b44_alloc_consistent(struct b44 *bp)
DMA_TABLE_BYTES,
DMA_TO_DEVICE);
- if (tx_ring_dma + size > B44_DMA_MASK) {
+ if (dma_mapping_error(tx_ring_dma) ||
+ tx_ring_dma + size > B44_DMA_MASK) {
kfree(tx_ring);
goto out_err;
}
diff --git a/mm/slab.c b/mm/slab.c
index b1d643b5238d..d31a06bfbea5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2200,11 +2200,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
- if (l3) {
+ if (l3 && l3->alien)
+ drain_alien_cache(cachep, l3->alien);
+ }
+
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (l3)
drain_array(cachep, l3, l3->shared, 1, node);
- if (l3->alien)
- drain_alien_cache(cachep, l3->alien);
- }
}
}
OpenPOWER on IntegriCloud