summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/amd_gart_64.c1
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c79
-rw-r--r--arch/x86/kernel/cpu/proc.c10
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/e820.c49
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c4
-rw-r--r--arch/x86/kernel/kvm.c7
-rw-r--r--arch/x86/kernel/pci-calgary_64.c25
-rw-r--r--arch/x86/kernel/pci-dma.c8
-rw-r--r--arch/x86/kernel/pci-nommu.c11
-rw-r--r--arch/x86/kernel/setup.c7
14 files changed, 166 insertions, 43 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 8233a630280f..dde437f5d14f 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -167,7 +167,8 @@ static int __init ffh_cstate_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (c->x86_vendor != X86_VENDOR_INTEL)
+ if (c->x86_vendor != X86_VENDOR_INTEL &&
+ c->x86_vendor != X86_VENDOR_AMD)
return -1;
cpu_cstate_entry = alloc_percpu(struct cstate_entry);
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 815dd63f49d0..cc0e8bc0ea3f 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -704,6 +704,7 @@ static const struct dma_map_ops gart_dma_ops = {
.alloc = gart_alloc_coherent,
.free = gart_free_coherent,
.mapping_error = gart_mapping_error,
+ .dma_supported = x86_dma_supported,
};
static void gart_iommu_shutdown(void)
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index c73c9fb281e1..d6f387780849 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -19,7 +19,7 @@
#include <linux/init.h>
#include <linux/delay.h>
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
return (u64)(cpu_khz) * 1000 * watchdog_thresh;
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 52000010c62e..cdf82492b770 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -21,6 +21,7 @@ obj-y += common.o
obj-y += rdrand.o
obj-y += match.o
obj-y += bugs.o
+obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
new file mode 100644
index 000000000000..d869c8671e36
--- /dev/null
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -0,0 +1,79 @@
+/*
+ * x86 APERF/MPERF KHz calculation for
+ * /sys/.../cpufreq/scaling_cur_freq
+ *
+ * Copyright (C) 2017 Intel Corp.
+ * Author: Len Brown <len.brown@intel.com>
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/math64.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+
+struct aperfmperf_sample {
+ unsigned int khz;
+ unsigned long jiffies;
+ u64 aperf;
+ u64 mperf;
+};
+
+static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
+
+/*
+ * aperfmperf_snapshot_khz()
+ * On the current CPU, snapshot APERF, MPERF, and jiffies
+ * unless we already did it within 10ms
+ * calculate kHz, save snapshot
+ */
+static void aperfmperf_snapshot_khz(void *dummy)
+{
+ u64 aperf, aperf_delta;
+ u64 mperf, mperf_delta;
+ struct aperfmperf_sample *s = this_cpu_ptr(&samples);
+
+ /* Don't bother re-computing within 10 ms */
+ if (time_before(jiffies, s->jiffies + HZ/100))
+ return;
+
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+
+ aperf_delta = aperf - s->aperf;
+ mperf_delta = mperf - s->mperf;
+
+ /*
+ * There is no architectural guarantee that MPERF
+ * increments faster than we can read it.
+ */
+ if (mperf_delta == 0)
+ return;
+
+ /*
+ * if (cpu_khz * aperf_delta) fits into ULLONG_MAX, then
+ * khz = (cpu_khz * aperf_delta) / mperf_delta
+ */
+ if (div64_u64(ULLONG_MAX, cpu_khz) > aperf_delta)
+ s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
+ else /* khz = aperf_delta / (mperf_delta / cpu_khz) */
+ s->khz = div64_u64(aperf_delta,
+ div64_u64(mperf_delta, cpu_khz));
+ s->jiffies = jiffies;
+ s->aperf = aperf;
+ s->mperf = mperf;
+}
+
+unsigned int arch_freq_get_on_cpu(int cpu)
+{
+ if (!cpu_khz)
+ return 0;
+
+ if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ return 0;
+
+ smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+
+ return per_cpu(samples.khz, cpu);
+}
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 6df621ae62a7..218f79825b3c 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -2,7 +2,6 @@
#include <linux/timex.h>
#include <linux/string.h>
#include <linux/seq_file.h>
-#include <linux/cpufreq.h>
/*
* Get CPU information for use by the procfs.
@@ -76,14 +75,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (c->microcode)
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
- if (cpu_has(c, X86_FEATURE_TSC)) {
- unsigned int freq = cpufreq_quick_get(cpu);
-
- if (!freq)
- freq = cpu_khz;
+ if (cpu_has(c, X86_FEATURE_TSC))
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
- freq / 1000, (freq % 1000));
- }
+ cpu_khz / 1000, (cpu_khz % 1000));
/* Cache size */
if (c->x86_cache_size >= 0)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 22217ece26c8..44404e2307bb 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -457,7 +457,7 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_NOTE;
phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
- phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
+ phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
(ehdr->e_phnum)++;
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d78a586ba8dc..532da61d605c 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -20,10 +20,12 @@
#include <asm/setup.h>
/*
- * We organize the E820 table into two main data structures:
+ * We organize the E820 table into three main data structures:
*
* - 'e820_table_firmware': the original firmware version passed to us by the
- * bootloader - not modified by the kernel. We use this to:
+ * bootloader - not modified by the kernel. It is composed of two parts:
+ * the first 128 E820 memory entries in boot_params.e820_table and the remaining
+ * (if any) entries of the SETUP_E820_EXT nodes. We use this to:
*
* - inform the user about the firmware's notion of memory layout
* via /sys/firmware/memmap
@@ -31,6 +33,14 @@
* - the hibernation code uses it to generate a kernel-independent MD5
* fingerprint of the physical memory layout of a system.
*
+ * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
+ * passed to us by the bootloader - the major difference between
+ * e820_table_firmware[] and this one is that, the latter marks the setup_data
+ * list created by the EFI boot stub as reserved, so that kexec can reuse the
+ * setup_data information in the second kernel. Besides, e820_table_kexec[]
+ * might also be modified by the kexec itself to fake a mptable.
+ * We use this to:
+ *
* - kexec, which is a bootloader in disguise, uses the original E820
* layout to pass to the kexec-ed kernel. This way the original kernel
* can have a restricted E820 map while the kexec()-ed kexec-kernel
@@ -46,9 +56,11 @@
* specific memory layout data during early bootup.
*/
static struct e820_table e820_table_init __initdata;
+static struct e820_table e820_table_kexec_init __initdata;
static struct e820_table e820_table_firmware_init __initdata;
struct e820_table *e820_table __refdata = &e820_table_init;
+struct e820_table *e820_table_kexec __refdata = &e820_table_kexec_init;
struct e820_table *e820_table_firmware __refdata = &e820_table_firmware_init;
/* For PCI or other memory-mapped resources */
@@ -470,9 +482,9 @@ u64 __init e820__range_update(u64 start, u64 size, enum e820_type old_type, enum
return __e820__range_update(e820_table, start, size, old_type, new_type);
}
-static u64 __init e820__range_update_firmware(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
+static u64 __init e820__range_update_kexec(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
{
- return __e820__range_update(e820_table_firmware, start, size, old_type, new_type);
+ return __e820__range_update(e820_table_kexec, start, size, old_type, new_type);
}
/* Remove a range of memory from the E820 table: */
@@ -546,9 +558,9 @@ void __init e820__update_table_print(void)
e820__print_table("modified");
}
-static void __init e820__update_table_firmware(void)
+static void __init e820__update_table_kexec(void)
{
- e820__update_table(e820_table_firmware);
+ e820__update_table(e820_table_kexec);
}
#define MAX_GAP_END 0x100000000ull
@@ -623,7 +635,7 @@ __init void e820__setup_pci_gap(void)
/*
* Called late during init, in free_initmem().
*
- * Initial e820_table and e820_table_firmware are largish __initdata arrays.
+ * Initial e820_table and e820_table_kexec are largish __initdata arrays.
*
* Copy them to a (usually much smaller) dynamically allocated area that is
* sized precisely after the number of e820 entries.
@@ -643,6 +655,12 @@ __init void e820__reallocate_tables(void)
memcpy(n, e820_table, size);
e820_table = n;
+ size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entries;
+ n = kmalloc(size, GFP_KERNEL);
+ BUG_ON(!n);
+ memcpy(n, e820_table_kexec, size);
+ e820_table_kexec = n;
+
size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries;
n = kmalloc(size, GFP_KERNEL);
BUG_ON(!n);
@@ -669,6 +687,9 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
__append_e820_table(extmap, entries);
e820__update_table(e820_table);
+ memcpy(e820_table_kexec, e820_table, sizeof(*e820_table_kexec));
+ memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware));
+
early_memunmap(sdata, data_len);
pr_info("e820: extended physical RAM map:\n");
e820__print_table("extended");
@@ -727,7 +748,7 @@ core_initcall(e820__register_nvs_regions);
/*
* Allocate the requested number of bytes with the requsted alignment
* and return (the physical address) to the caller. Also register this
- * range in the 'firmware' E820 table as a reserved range.
+ * range in the 'kexec' E820 table as a reserved range.
*
* This allows kexec to fake a new mptable, as if it came from the real
* system.
@@ -738,9 +759,9 @@ u64 __init e820__memblock_alloc_reserved(u64 size, u64 align)
addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
if (addr) {
- e820__range_update_firmware(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
- pr_info("e820: update e820_table_firmware for e820__memblock_alloc_reserved()\n");
- e820__update_table_firmware();
+ e820__range_update_kexec(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
+ pr_info("e820: update e820_table_kexec for e820__memblock_alloc_reserved()\n");
+ e820__update_table_kexec();
}
return addr;
@@ -923,13 +944,13 @@ void __init e820__reserve_setup_data(void)
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+ e820__range_update_kexec(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
pa_data = data->next;
early_memunmap(data, sizeof(*data));
}
e820__update_table(e820_table);
-
- memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware));
+ e820__update_table(e820_table_kexec);
pr_info("extended physical RAM map:\n");
e820__print_table("reserve setup_data");
@@ -1062,6 +1083,7 @@ void __init e820__reserve_resources(void)
res++;
}
+ /* Expose the bootloader-provided memory layout to the sysfs. */
for (i = 0; i < e820_table_firmware->nr_entries; i++) {
struct e820_entry *entry = e820_table_firmware->entries + i;
@@ -1175,6 +1197,7 @@ void __init e820__memory_setup(void)
who = x86_init.resources.memory_setup();
+ memcpy(e820_table_kexec, e820_table, sizeof(*e820_table_kexec));
memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware));
pr_info("e820: BIOS-provided physical RAM map:\n");
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 9d7fd5e6689a..fb095ba0c02f 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -100,14 +100,14 @@ static int setup_e820_entries(struct boot_params *params)
{
unsigned int nr_e820_entries;
- nr_e820_entries = e820_table_firmware->nr_entries;
+ nr_e820_entries = e820_table_kexec->nr_entries;
/* TODO: Pass entries more than E820_MAX_ENTRIES_ZEROPAGE in bootparams setup data */
if (nr_e820_entries > E820_MAX_ENTRIES_ZEROPAGE)
nr_e820_entries = E820_MAX_ENTRIES_ZEROPAGE;
params->e820_entries = nr_e820_entries;
- memcpy(&params->e820_table, &e820_table_firmware->entries, nr_e820_entries*sizeof(struct e820_entry));
+ memcpy(&params->e820_table, &e820_table_kexec->entries, nr_e820_entries*sizeof(struct e820_entry));
return 0;
}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 43e10d6fdbed..71c17a5be983 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -330,7 +330,12 @@ static void kvm_guest_cpu_init(void)
#ifdef CONFIG_PREEMPT
pa |= KVM_ASYNC_PF_SEND_ALWAYS;
#endif
- wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
+ pa |= KVM_ASYNC_PF_ENABLED;
+
+ /* Async page fault support for L1 hypervisor is optional */
+ if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
+ (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
+ wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
__this_cpu_write(apf_reason.enabled, 1);
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
smp_processor_id());
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index fda7867046d0..5286a4a92cf7 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -50,6 +50,8 @@
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
+#define CALGARY_MAPPING_ERROR 0
+
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
int use_calgary __read_mostly = 1;
#else
@@ -252,7 +254,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (panic_on_overflow)
panic("Calgary: fix the allocator.\n");
else
- return DMA_ERROR_CODE;
+ return CALGARY_MAPPING_ERROR;
}
}
@@ -272,10 +274,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
entry = iommu_range_alloc(dev, tbl, npages);
- if (unlikely(entry == DMA_ERROR_CODE)) {
+ if (unlikely(entry == CALGARY_MAPPING_ERROR)) {
pr_warn("failed to allocate %u pages in iommu %p\n",
npages, tbl);
- return DMA_ERROR_CODE;
+ return CALGARY_MAPPING_ERROR;
}
/* set the return dma address */
@@ -295,7 +297,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long flags;
/* were we called with bad_dma_address? */
- badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
+ badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE);
if (unlikely(dma_addr < badend)) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
@@ -380,7 +382,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
entry = iommu_range_alloc(dev, tbl, npages);
- if (entry == DMA_ERROR_CODE) {
+ if (entry == CALGARY_MAPPING_ERROR) {
/* makes sure unmap knows to stop */
s->dma_length = 0;
goto error;
@@ -398,7 +400,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
error:
calgary_unmap_sg(dev, sg, nelems, dir, 0);
for_each_sg(sg, s, nelems, i) {
- sg->dma_address = DMA_ERROR_CODE;
+ sg->dma_address = CALGARY_MAPPING_ERROR;
sg->dma_length = 0;
}
return 0;
@@ -453,7 +455,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
/* set up tces to cover the allocated range */
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
- if (mapping == DMA_ERROR_CODE)
+ if (mapping == CALGARY_MAPPING_ERROR)
goto free;
*dma_handle = mapping;
return ret;
@@ -478,6 +480,11 @@ static void calgary_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)vaddr, get_order(size));
}
+static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == CALGARY_MAPPING_ERROR;
+}
+
static const struct dma_map_ops calgary_dma_ops = {
.alloc = calgary_alloc_coherent,
.free = calgary_free_coherent,
@@ -485,6 +492,8 @@ static const struct dma_map_ops calgary_dma_ops = {
.unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page,
.unmap_page = calgary_unmap_page,
+ .mapping_error = calgary_mapping_error,
+ .dma_supported = x86_dma_supported,
};
static inline void __iomem * busno_to_bbar(unsigned char num)
@@ -732,7 +741,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
struct iommu_table *tbl = pci_iommu(dev->bus);
/* reserve EMERGENCY_PAGES from bad_dma_address and up */
- iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);
+ iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES);
/* avoid the BIOS/VGA first 640KB-1MB region */
/* for CalIOC2 - avoid the entire first MB */
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3a216ec869cd..5e16d3f29594 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -213,10 +213,8 @@ static __init int iommu_setup(char *p)
}
early_param("iommu", iommu_setup);
-int dma_supported(struct device *dev, u64 mask)
+int x86_dma_supported(struct device *dev, u64 mask)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -224,9 +222,6 @@ int dma_supported(struct device *dev, u64 mask)
}
#endif
- if (ops->dma_supported)
- return ops->dma_supported(dev, mask);
-
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
@@ -252,7 +247,6 @@ int dma_supported(struct device *dev, u64 mask)
return 1;
}
-EXPORT_SYMBOL(dma_supported);
static int __init pci_iommu_init(void)
{
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index a88952ef371c..a6d404087fe3 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -11,6 +11,8 @@
#include <asm/iommu.h>
#include <asm/dma.h>
+#define NOMMU_MAPPING_ERROR 0
+
static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
@@ -33,7 +35,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
- return DMA_ERROR_CODE;
+ return NOMMU_MAPPING_ERROR;
flush_write_buffers();
return bus;
}
@@ -88,6 +90,11 @@ static void nommu_sync_sg_for_device(struct device *dev,
flush_write_buffers();
}
+static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == NOMMU_MAPPING_ERROR;
+}
+
const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent,
@@ -96,4 +103,6 @@ const struct dma_map_ops nommu_dma_ops = {
.sync_single_for_device = nommu_sync_single_for_device,
.sync_sg_for_device = nommu_sync_sg_for_device,
.is_phys = 1,
+ .mapping_error = nommu_mapping_error,
+ .dma_supported = x86_dma_supported,
};
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 65622f07e633..3486d0498800 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1076,6 +1076,13 @@ void __init setup_arch(char **cmdline_p)
max_possible_pfn = max_pfn;
/*
+ * This call is required when the CPU does not support PAT. If
+ * mtrr_bp_init() invoked it already via pat_init() the call has no
+ * effect.
+ */
+ init_cache_modes();
+
+ /*
* Define random base addresses for memory sections after max_pfn is
* defined and before each memory section base is used.
*/
OpenPOWER on IntegriCloud