summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c22
-rw-r--r--arch/arm64/kernel/cpufeature.c19
-rw-r--r--arch/arm64/kernel/entry.S7
-rw-r--r--arch/arm64/kernel/head.S24
-rw-r--r--arch/arm64/kernel/hibernate.c82
-rw-r--r--arch/arm64/kernel/kgdb.c36
-rw-r--r--arch/arm64/kernel/pci.c159
-rw-r--r--arch/arm64/kernel/probes/kprobes.c31
-rw-r--r--arch/arm64/kernel/ptrace.c8
-rw-r--r--arch/arm64/kernel/setup.c15
-rw-r--r--arch/arm64/kernel/sleep.S10
-rw-r--r--arch/arm64/kernel/smp.c22
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S13
13 files changed, 278 insertions, 170 deletions
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 5f72475e2e3b..42ffdb54e162 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -121,7 +121,7 @@ static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
* 0 - If all the hooks ran successfully.
* -EINVAL - At least one hook is not supported by the CPU.
*/
-static int run_all_insn_set_hw_mode(unsigned long cpu)
+static int run_all_insn_set_hw_mode(unsigned int cpu)
{
int rc = 0;
unsigned long flags;
@@ -131,7 +131,7 @@ static int run_all_insn_set_hw_mode(unsigned long cpu)
list_for_each_entry(insn, &insn_emulation, node) {
bool enable = (insn->current_mode == INSN_HW);
if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
- pr_warn("CPU[%ld] cannot support the emulation of %s",
+ pr_warn("CPU[%u] cannot support the emulation of %s",
cpu, insn->ops->name);
rc = -EINVAL;
}
@@ -611,20 +611,6 @@ static struct insn_emulation_ops setend_ops = {
.set_hw_mode = setend_set_hw_mode,
};
-static int insn_cpu_hotplug_notify(struct notifier_block *b,
- unsigned long action, void *hcpu)
-{
- int rc = 0;
- if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
- rc = run_all_insn_set_hw_mode((unsigned long)hcpu);
-
- return notifier_from_errno(rc);
-}
-
-static struct notifier_block insn_cpu_hotplug_notifier = {
- .notifier_call = insn_cpu_hotplug_notify,
-};
-
/*
* Invoked as late_initcall, since not needed before init spawned.
*/
@@ -643,7 +629,9 @@ static int __init armv8_deprecated_init(void)
pr_info("setend instruction emulation is not supported on the system");
}
- register_cpu_notifier(&insn_cpu_hotplug_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
+ "AP_ARM64_ISNDEP_STARTING",
+ run_all_insn_set_hw_mode, NULL);
register_insn_emulation_sysctl(ctl_abi);
return 0;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 916d27ad79c1..62272eac1352 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -726,6 +726,19 @@ static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused
return is_kernel_in_hyp_mode();
}
+static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
+
+ /*
+ * Activate the lower HYP offset only if:
+ * - the idmap doesn't clash with it,
+ * - the kernel is not running at EL2.
+ */
+ return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -803,6 +816,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64PFR0_EL0_SHIFT,
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
},
+ {
+ .desc = "Reduced HYP mapping offset",
+ .capability = ARM64_HYP_OFFSET_LOW,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = hyp_offset_low,
+ },
{},
};
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 96e4a2b64cc1..441420ca7d08 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -353,6 +353,8 @@ el1_sync:
lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da
+ cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
+ b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
@@ -364,6 +366,11 @@ el1_sync:
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg
b el1_inv
+
+el1_ia:
+ /*
+ * Fall through to the Data abort case
+ */
el1_da:
/*
* Data abort handling
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2c6e598a94dc..3e7b050e99dc 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
isb
bl __create_page_tables // recreate kernel mapping
+ tlbi vmalle1 // Remove any stale TLB entries
+ dsb nsh
+
msr sctlr_el1, x19 // re-enable the MMU
isb
ic iallu // flush instructions fetched
@@ -781,40 +784,25 @@ __primary_switch:
* Iterate over each entry in the relocation table, and apply the
* relocations in place.
*/
- ldr w8, =__dynsym_offset // offset to symbol table
ldr w9, =__rela_offset // offset to reloc table
ldr w10, =__rela_size // size of reloc table
mov_q x11, KIMAGE_VADDR // default virtual offset
add x11, x11, x23 // actual virtual offset
- add x8, x8, x11 // __va(.dynsym)
add x9, x9, x11 // __va(.rela)
add x10, x9, x10 // __va(.rela) + sizeof(.rela)
0: cmp x9, x10
- b.hs 2f
+ b.hs 1f
ldp x11, x12, [x9], #24
ldr x13, [x9, #-8]
cmp w12, #R_AARCH64_RELATIVE
- b.ne 1f
+ b.ne 0b
add x13, x13, x23 // relocate
str x13, [x11, x23]
b 0b
-1: cmp w12, #R_AARCH64_ABS64
- b.ne 0b
- add x12, x12, x12, lsl #1 // symtab offset: 24x top word
- add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
- ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
- ldr x15, [x12, #8] // Elf64_Sym::st_value
- cmp w14, #-0xf // SHN_ABS (0xfff1) ?
- add x14, x15, x23 // relocate
- csel x15, x14, x15, ne
- add x15, x13, x15
- str x15, [x11, x23]
- b 0b
-
-2:
+1:
#endif
ldr x8, =__primary_switched
br x8
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 21ab5df9fa76..65d81f965e74 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -35,6 +35,7 @@
#include <asm/sections.h>
#include <asm/smp.h>
#include <asm/suspend.h>
+#include <asm/sysreg.h>
#include <asm/virt.h>
/*
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
set_pte(pte, __pte(virt_to_phys((void *)dst) |
pgprot_val(PAGE_KERNEL_EXEC)));
- /* Load our new page tables */
- asm volatile("msr ttbr0_el1, %0;"
- "isb;"
- "tlbi vmalle1is;"
- "dsb ish;"
- "isb" : : "r"(virt_to_phys(pgd)));
+ /*
+ * Load our new page tables. A strict BBM approach requires that we
+ * ensure that TLBs are free of any entries that may overlap with the
+ * global mappings we are about to install.
+ *
+ * For a real hibernate/resume cycle TTBR0 currently points to a zero
+ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
+ * runtime services), while for a userspace-driven test_resume cycle it
+ * points to userspace page tables (and we must point it at a zero page
+ * ourselves). Elsewhere we only (un)install the idmap with preemption
+ * disabled, so T0SZ should be as required regardless.
+ */
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+ isb();
*phys_dst_addr = virt_to_phys((void *)dst);
@@ -394,6 +405,38 @@ int swsusp_arch_resume(void)
void *, phys_addr_t, phys_addr_t);
/*
+ * Restoring the memory image will overwrite the ttbr1 page tables.
+ * Create a second copy of just the linear map, and use this when
+ * restoring.
+ */
+ tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!tmp_pg_dir) {
+ pr_err("Failed to allocate memory for temporary page tables.");
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+ if (rc)
+ goto out;
+
+ /*
+ * Since we only copied the linear map, we need to find restore_pblist's
+ * linear map address.
+ */
+ lm_restore_pblist = LMADDR(restore_pblist);
+
+ /*
+ * We need a zero page that is zero before & after resume in order to
+ * to break before make on the ttbr1 page tables.
+ */
+ zero_page = (void *)get_safe_page(GFP_ATOMIC);
+ if (!zero_page) {
+ pr_err("Failed to allocate zero page.");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
* Locate the exit code in the bottom-but-one page, so that *NULL
* still has disastrous affects.
*/
@@ -419,27 +462,6 @@ int swsusp_arch_resume(void)
__flush_dcache_area(hibernate_exit, exit_size);
/*
- * Restoring the memory image will overwrite the ttbr1 page tables.
- * Create a second copy of just the linear map, and use this when
- * restoring.
- */
- tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
- if (!tmp_pg_dir) {
- pr_err("Failed to allocate memory for temporary page tables.");
- rc = -ENOMEM;
- goto out;
- }
- rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
- if (rc)
- goto out;
-
- /*
- * Since we only copied the linear map, we need to find restore_pblist's
- * linear map address.
- */
- lm_restore_pblist = LMADDR(restore_pblist);
-
- /*
* KASLR will cause the el2 vectors to be in a different location in
* the resumed kernel. Load hibernate's temporary copy into el2.
*
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
__hyp_set_vectors(el2_vectors);
}
- /*
- * We need a zero page that is zero before & after resume in order to
- * to break before make on the ttbr1 page tables.
- */
- zero_page = (void *)get_safe_page(GFP_ATOMIC);
-
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
resume_hdr.reenter_kernel, lm_restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 8c57f6496e56..e017a9493b92 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -19,10 +19,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bug.h>
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
#include <linux/kprobes.h>
+#include <asm/debug-monitors.h>
+#include <asm/insn.h>
#include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@@ -338,15 +341,24 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier);
}
-/*
- * ARM instructions are always in LE.
- * Break instruction is encoded in LE format
- */
-struct kgdb_arch arch_kgdb_ops = {
- .gdb_bpt_instr = {
- KGDB_DYN_BRK_INS_BYTE(0),
- KGDB_DYN_BRK_INS_BYTE(1),
- KGDB_DYN_BRK_INS_BYTE(2),
- KGDB_DYN_BRK_INS_BYTE(3),
- }
-};
+struct kgdb_arch arch_kgdb_ops;
+
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+ int err;
+
+ BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);
+
+ err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);
+ if (err)
+ return err;
+
+ return aarch64_insn_write((void *)bpt->bpt_addr,
+ (u32)AARCH64_BREAK_KGDB_DYN_DBG);
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+ return aarch64_insn_write((void *)bpt->bpt_addr,
+ *(u32 *)bpt->saved_instr);
+}
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 3c4e308b40a0..acf38722457b 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -17,6 +17,9 @@
#include <linux/mm.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/slab.h>
/*
@@ -36,25 +39,17 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return res->start;
}
-/**
- * pcibios_enable_device - Enable I/O and memory.
- * @dev: PCI device to be enabled
- * @mask: bitmask of BARs to enable
- */
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- if (pci_has_flag(PCI_PROBE_ONLY))
- return 0;
-
- return pci_enable_resources(dev, mask);
-}
-
/*
- * Try to assign the IRQ number from DT when adding a new device
+ * Try to assign the IRQ number when probing a new device
*/
-int pcibios_add_device(struct pci_dev *dev)
+int pcibios_alloc_irq(struct pci_dev *dev)
{
- dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+ if (acpi_disabled)
+ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+#ifdef CONFIG_ACPI
+ else
+ return acpi_pci_irq_enable(dev);
+#endif
return 0;
}
@@ -65,13 +60,21 @@ int pcibios_add_device(struct pci_dev *dev)
int raw_pci_read(unsigned int domain, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *val)
{
- return -ENXIO;
+ struct pci_bus *b = pci_find_bus(domain, bus);
+
+ if (!b)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return b->ops->read(b, devfn, reg, len, val);
}
int raw_pci_write(unsigned int domain, unsigned int bus,
unsigned int devfn, int reg, int len, u32 val)
{
- return -ENXIO;
+ struct pci_bus *b = pci_find_bus(domain, bus);
+
+ if (!b)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return b->ops->write(b, devfn, reg, len, val);
}
#ifdef CONFIG_NUMA
@@ -85,10 +88,124 @@ EXPORT_SYMBOL(pcibus_to_node);
#endif
#ifdef CONFIG_ACPI
-/* Root bridge scanning */
+
+struct acpi_pci_generic_root_info {
+ struct acpi_pci_root_info common;
+ struct pci_config_window *cfg; /* config space mapping */
+};
+
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+
+ return root->segment;
+}
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ if (!acpi_disabled) {
+ struct pci_config_window *cfg = bridge->bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ ACPI_COMPANION_SET(&bridge->dev, adev);
+ }
+
+ return 0;
+}
+
+/*
+ * Lookup the bus range for the domain in MCFG, and set up config space
+ * mapping.
+ */
+static struct pci_config_window *
+pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
+{
+ struct resource *bus_res = &root->secondary;
+ u16 seg = root->segment;
+ struct pci_config_window *cfg;
+ struct resource cfgres;
+ unsigned int bsz;
+
+ /* Use address from _CBA if present, otherwise lookup MCFG */
+ if (!root->mcfg_addr)
+ root->mcfg_addr = pci_mcfg_lookup(seg, bus_res);
+
+ if (!root->mcfg_addr) {
+ dev_err(&root->device->dev, "%04x:%pR ECAM region not found\n",
+ seg, bus_res);
+ return NULL;
+ }
+
+ bsz = 1 << pci_generic_ecam_ops.bus_shift;
+ cfgres.start = root->mcfg_addr + bus_res->start * bsz;
+ cfgres.end = cfgres.start + resource_size(bus_res) * bsz - 1;
+ cfgres.flags = IORESOURCE_MEM;
+ cfg = pci_ecam_create(&root->device->dev, &cfgres, bus_res,
+ &pci_generic_ecam_ops);
+ if (IS_ERR(cfg)) {
+ dev_err(&root->device->dev, "%04x:%pR error %ld mapping ECAM\n",
+ seg, bus_res, PTR_ERR(cfg));
+ return NULL;
+ }
+
+ return cfg;
+}
+
+/* release_info: free resources allocated by init_info */
+static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
+{
+ struct acpi_pci_generic_root_info *ri;
+
+ ri = container_of(ci, struct acpi_pci_generic_root_info, common);
+ pci_ecam_free(ri->cfg);
+ kfree(ri);
+}
+
+static struct acpi_pci_root_ops acpi_pci_root_ops = {
+ .release_info = pci_acpi_generic_release_info,
+};
+
+/* Interface called from ACPI code to setup PCI host controller */
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
- /* TODO: Should be revisited when implementing PCI on ACPI */
- return NULL;
+ int node = acpi_get_node(root->device->handle);
+ struct acpi_pci_generic_root_info *ri;
+ struct pci_bus *bus, *child;
+
+ ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node);
+ if (!ri)
+ return NULL;
+
+ ri->cfg = pci_acpi_setup_ecam_mapping(root);
+ if (!ri->cfg) {
+ kfree(ri);
+ return NULL;
+ }
+
+ acpi_pci_root_ops.pci_ops = &ri->cfg->ops->pci_ops;
+ bus = acpi_pci_root_create(root, &acpi_pci_root_ops, &ri->common,
+ ri->cfg);
+ if (!bus)
+ return NULL;
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ return bus;
}
+
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
#endif
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index bf9768588288..c6b0f40620d8 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
-static inline unsigned long min_stack_size(unsigned long addr)
-{
- unsigned long size;
-
- if (on_irq_stack(addr, raw_smp_processor_id()))
- size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
- else
- size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
-
- return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
-}
-
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
/* prepare insn slot */
@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long stack_ptr = kernel_stack_pointer(regs);
kcb->jprobe_saved_regs = *regs;
/*
- * As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
+ * Since we can't be sure where in the stack frame "stacked"
+ * pass-by-value arguments are stored we just don't try to
+ * duplicate any of the stack. Do not use jprobes on functions that
+ * use more than 64 bytes (after padding each to an 8 byte boundary)
+ * of arguments, or pass individual arguments larger than 16 bytes.
*/
- kasan_disable_current();
- memcpy(kcb->jprobes_stack, (void *)stack_ptr,
- min_stack_size(stack_ptr));
- kasan_enable_current();
instruction_pointer_set(regs, (unsigned long) jp->entry);
preempt_disable();
@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
}
unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs;
- kasan_disable_current();
- memcpy((void *)stack_addr, kcb->jprobes_stack,
- min_stack_size(stack_addr));
- kasan_enable_current();
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 030c1d5aa46d..e0c81da60f76 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1347,13 +1347,13 @@ static void tracehook_report_syscall(struct pt_regs *regs,
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
- /* Do the secure computing check first; failures should be fast. */
- if (secure_computing() == -1)
- return -1;
-
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+ /* Do the secure computing after ptrace; failures should be fast. */
+ if (secure_computing(NULL) == -1)
+ return -1;
+
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, regs->syscallno);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 2981f1bdd073..536dce22fe76 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -39,9 +39,7 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
-#include <linux/of_iommu.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <linux/efi.h>
#include <linux/psci.h>
@@ -303,19 +301,6 @@ void __init setup_arch(char **cmdline_p)
}
}
-static int __init arm64_device_init(void)
-{
- if (of_have_populated_dt()) {
- of_iommu_init();
- of_platform_populate(NULL, of_default_bus_match_table,
- NULL, NULL);
- } else if (acpi_disabled) {
- pr_crit("Device tree not populated\n");
- }
- return 0;
-}
-arch_initcall_sync(arm64_device_init);
-
static int __init topology_init(void)
{
int i;
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 9a3aec97ac09..ccf79d849e0a 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,12 +101,20 @@ ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
/* enable the MMU early - so we can access sleep_save_stash by va */
adr_l lr, __enable_mmu /* __cpu_setup will return here */
- ldr x27, =_cpu_resume /* __enable_mmu will branch here */
+ adr_l x27, _resume_switched /* __enable_mmu will branch here */
adrp x25, idmap_pg_dir
adrp x26, swapper_pg_dir
b __cpu_setup
ENDPROC(cpu_resume)
+ .pushsection ".idmap.text", "ax"
+_resume_switched:
+ ldr x8, =_cpu_resume
+ br x8
+ENDPROC(_resume_switched)
+ .ltorg
+ .popsection
+
ENTRY(_cpu_resume)
mrs x1, mpidr_el1
adrp x8, mpidr_hash
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 76a6d9263908..3ff173e92582 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -201,12 +201,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
return ret;
}
-static void smp_store_cpu_info(unsigned int cpuid)
-{
- store_cpu_topology(cpuid);
- numa_store_cpu_info(cpuid);
-}
-
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -254,7 +248,7 @@ asmlinkage void secondary_start_kernel(void)
*/
notify_cpu_starting(cpu);
- smp_store_cpu_info(cpu);
+ store_cpu_topology(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -661,9 +655,9 @@ void __init smp_init_cpus(void)
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
acpi_parse_gic_cpu_interface, 0);
- if (cpu_count > NR_CPUS)
- pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
- cpu_count, NR_CPUS);
+ if (cpu_count > nr_cpu_ids)
+ pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
+ cpu_count, nr_cpu_ids);
if (!bootcpu_valid) {
pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
@@ -677,7 +671,7 @@ void __init smp_init_cpus(void)
* with entries in cpu_logical_map while initializing the cpus.
* If the cpu set-up fails, invalidate the cpu_logical_map entry.
*/
- for (i = 1; i < NR_CPUS; i++) {
+ for (i = 1; i < nr_cpu_ids; i++) {
if (cpu_logical_map(i) != INVALID_HWID) {
if (smp_cpu_setup(i))
cpu_logical_map(i) = INVALID_HWID;
@@ -689,10 +683,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
int err;
unsigned int cpu;
+ unsigned int this_cpu;
init_cpu_topology();
- smp_store_cpu_info(smp_processor_id());
+ this_cpu = smp_processor_id();
+ store_cpu_topology(this_cpu);
+ numa_store_cpu_info(this_cpu);
/*
* If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
@@ -719,6 +716,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
continue;
set_cpu_present(cpu, true);
+ numa_store_cpu_info(cpu);
}
}
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 89d6e177ecbd..659963d40bb4 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -103,6 +103,7 @@ SECTIONS
*(.discard)
*(.discard.*)
*(.interp .dynamic)
+ *(.dynsym .dynstr .hash)
}
. = KIMAGE_VADDR + TEXT_OFFSET;
@@ -174,19 +175,9 @@ SECTIONS
.rela : ALIGN(8) {
*(.rela .rela*)
}
- .dynsym : ALIGN(8) {
- *(.dynsym)
- }
- .dynstr : {
- *(.dynstr)
- }
- .hash : {
- *(.hash)
- }
- __rela_offset = ADDR(.rela) - KIMAGE_VADDR;
+ __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
__rela_size = SIZEOF(.rela);
- __dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR;
. = ALIGN(SEGMENT_ALIGN);
__init_end = .;
OpenPOWER on IntegriCloud