diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-02-01 09:12:25 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-02-01 09:12:25 +0100 |
commit | ed5c8c854f2b990dfa4d85c4995d115768a05d3c (patch) | |
tree | a4a826795ec6784f25bebade98bdb20121d9c211 /arch/arc | |
parent | 619bd4a71874a8fd78eb6ccf9f272c5e98bcc7b7 (diff) | |
parent | a2ca3d617944417e9dd5f09fc8a4549cda115f4f (diff) | |
download | blackbird-obmc-linux-ed5c8c854f2b990dfa4d85c4995d115768a05d3c.tar.gz blackbird-obmc-linux-ed5c8c854f2b990dfa4d85c4995d115768a05d3c.zip |
Merge branch 'linus' into sched/core, to pick up fixes and refresh the branch
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/Kconfig | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/cache.h | 9 | ||||
-rw-r--r-- | arch/arc/include/asm/delay.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/entry-arcv2.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/module.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/ptrace.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/setup.h | 1 | ||||
-rw-r--r-- | arch/arc/kernel/head.S | 14 | ||||
-rw-r--r-- | arch/arc/kernel/intc-arcv2.c | 6 | ||||
-rw-r--r-- | arch/arc/kernel/intc-compact.c | 4 | ||||
-rw-r--r-- | arch/arc/kernel/mcip.c | 59 | ||||
-rw-r--r-- | arch/arc/kernel/module.c | 4 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 25 | ||||
-rw-r--r-- | arch/arc/kernel/unaligned.c | 3 | ||||
-rw-r--r-- | arch/arc/mm/cache.c | 155 | ||||
-rw-r--r-- | arch/arc/mm/init.c | 5 |
16 files changed, 206 insertions, 93 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index c75d29077e4a..283099c9560a 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -29,7 +29,7 @@ config ARC select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_MEMBLOCK - select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND + select HAVE_MOD_ARCH_SPECIFIC select HAVE_OPROFILE select HAVE_PERF_EVENTS select HANDLE_DOMAIN_IRQ diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index b3410ff6a62d..5008021fba98 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_IC_PTAG_HI 0x1F /* Bit val in IC_CTRL */ -#define IC_CTRL_CACHE_DISABLE 0x1 +#define IC_CTRL_DIS 0x1 /* Data cache related Auxiliary registers */ #define ARC_REG_DC_BCR 0x72 /* Build Config reg */ @@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_DC_PTAG_HI 0x5F /* Bit val in DC_CTRL */ -#define DC_CTRL_INV_MODE_FLUSH 0x40 -#define DC_CTRL_FLUSH_STATUS 0x100 +#define DC_CTRL_DIS 0x001 +#define DC_CTRL_INV_MODE_FLUSH 0x040 +#define DC_CTRL_FLUSH_STATUS 0x100 /*System-level cache (L2 cache) related Auxiliary registers */ #define ARC_REG_SLC_CFG 0x901 @@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_SLC_RGN_END 0x916 /* Bit val in SLC_CONTROL */ +#define SLC_CTRL_DIS 0x001 #define SLC_CTRL_IM 0x040 -#define SLC_CTRL_DISABLE 0x001 #define SLC_CTRL_BUSY 0x100 #define SLC_CTRL_RGN_OP_INV 0x200 diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index a36e8601114d..d5da2115d78a 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h @@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops) " lp 1f \n" " nop \n" "1: \n" - : : "r"(loops)); + : + : "r"(loops) + : "lp_count"); } extern void __bad_udelay(void); diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index b5ff87e6f4b7..aee1a77934cf 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -16,6 +16,7 @@ ; ; Now manually save: r12, sp, fp, gp, r25 + PUSH r30 PUSH r12 ; Saving pt_regs->sp correctly requires some extra work due to the way @@ -72,6 +73,7 @@ POPAX AUX_USER_SP 1: POP r12 + POP r30 .endm diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h index 6e91d8b339c3..567590ea8f6c 100644 --- a/arch/arc/include/asm/module.h +++ b/arch/arc/include/asm/module.h @@ -14,13 +14,13 @@ #include <asm-generic/module.h> -#ifdef CONFIG_ARC_DW2_UNWIND struct mod_arch_specific { +#ifdef CONFIG_ARC_DW2_UNWIND void *unw_info; int unw_sec_idx; +#endif const char *secstr; }; -#endif #define MODULE_PROC_FAMILY "ARC700" diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 69095da1fcfd..47111d565a95 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -84,7 +84,7 @@ struct pt_regs { unsigned long fp; unsigned long sp; /* user/kernel sp depending on where we came from */ - unsigned long r12; + unsigned long r12, r30; /*------- Below list auto saved by h/w -----------*/ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h index cb954cdab070..c568a9df82b1 100644 --- a/arch/arc/include/asm/setup.h +++ b/arch/arc/include/asm/setup.h @@ -31,6 +31,7 @@ extern int root_mountflags, end_mem; void setup_processor(void); void __init setup_arch_memory(void); +long __init arc_get_mem_sz(void); /* Helpers used in arc_*_mumbojumbo routines */ #define IS_AVAIL1(v, s) ((v) ? s : "") diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 689dd867fdff..8b90d25a15cc 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -71,14 +71,14 @@ ENTRY(stext) GET_CPU_ID r5 cmp r5, 0 mov.nz r0, r5 -#ifdef CONFIG_ARC_SMP_HALT_ON_RESET - ; Non-Master can proceed as system would be booted sufficiently - jnz first_lines_of_secondary -#else + bz .Lmaster_proceed + ; Non-Masters wait for Master to boot enough and bring them up - jnz arc_platform_smp_wait_to_boot -#endif - ; Master falls thru + ; when they resume, tail-call to entry point + mov blink, @first_lines_of_secondary + j arc_platform_smp_wait_to_boot + +.Lmaster_proceed: #endif ; Clear BSS before updating any globals diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 994dca7014db..ecef0fb0b66c 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -77,20 +77,20 @@ void arc_init_IRQ(void) static void arcv2_irq_mask(struct irq_data *data) { - write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_SELECT, data->hwirq); write_aux_reg(AUX_IRQ_ENABLE, 0); } static void arcv2_irq_unmask(struct irq_data *data) { - write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_SELECT, data->hwirq); write_aux_reg(AUX_IRQ_ENABLE, 1); } void arcv2_irq_enable(struct irq_data *data) { /* set default priority */ - write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_SELECT, data->hwirq); write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); /* diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index ce9deb953ca9..8c1fd5c00782 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data) unsigned int ienb; ienb = read_aux_reg(AUX_IENABLE); - ienb &= ~(1 << data->irq); + ienb &= ~(1 << data->hwirq); write_aux_reg(AUX_IENABLE, ienb); } @@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data) unsigned int ienb; ienb = read_aux_reg(AUX_IENABLE); - ienb |= (1 << data->irq); + ienb |= (1 << data->hwirq); write_aux_reg(AUX_IENABLE, ienb); } diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 560c4afc2af4..9f6b68fd4f3b 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -10,6 +10,7 @@ #include <linux/smp.h> #include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> #include <linux/spinlock.h> #include <soc/arc/mcip.h> #include <asm/irqflags-arcv2.h> @@ -92,11 +93,10 @@ static void mcip_probe_n_setup(void) READ_BCR(ARC_REG_MCIP_BCR, mp); sprintf(smp_cpuinfo_buf, - "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", + "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", mp.ver, mp.num_cores, IS_AVAIL1(mp.ipi, "IPI "), IS_AVAIL1(mp.idu, "IDU "), - IS_AVAIL1(mp.llm, "LLM "), IS_AVAIL1(mp.dbg, "DEBUG "), IS_AVAIL1(mp.gfrc, "GFRC")); @@ -174,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data) raw_spin_unlock_irqrestore(&mcip_lock, flags); } -#ifdef CONFIG_SMP static int idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, bool force) @@ -204,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, return IRQ_SET_MASK_OK; } -#endif + +static void idu_irq_enable(struct irq_data *data) +{ + /* + * By default send all common interrupts to all available online CPUs. + * The affinity of common interrupts in IDU must be set manually since + * in some cases the kernel will not call irq_set_affinity() by itself: + * 1. When the kernel is not configured with support of SMP. + * 2. When the kernel is configured with support of SMP but upper + * interrupt controllers does not support setting of the affinity + * and cannot propagate it to IDU. + */ + idu_irq_set_affinity(data, cpu_online_mask, false); + idu_irq_unmask(data); +} static struct irq_chip idu_irq_chip = { .name = "MCIP IDU Intc", .irq_mask = idu_irq_mask, .irq_unmask = idu_irq_unmask, + .irq_enable = idu_irq_enable, #ifdef CONFIG_SMP .irq_set_affinity = idu_irq_set_affinity, #endif @@ -221,10 +235,13 @@ static irq_hw_number_t idu_first_hwirq; static void idu_cascade_isr(struct irq_desc *desc) { struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); + struct irq_chip *core_chip = irq_desc_get_chip(desc); irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; + chained_irq_enter(core_chip, desc); generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); + chained_irq_exit(core_chip, desc); } static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) @@ -239,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { - irq_hw_number_t hwirq = *out_hwirq = intspec[0]; - int distri = intspec[1]; - unsigned long flags; - + /* + * Ignore value of interrupt distribution mode for common interrupts in + * IDU which resides in intspec[1] since setting an affinity using value + * from Device Tree is deprecated in ARC. + */ + *out_hwirq = intspec[0]; *out_type = IRQ_TYPE_NONE; - /* XXX: validate distribution scheme again online cpu mask */ - if (distri == 0) { - /* 0 - Round Robin to all cpus, otherwise 1 bit per core */ - raw_spin_lock_irqsave(&mcip_lock, flags); - idu_set_dest(hwirq, BIT(num_online_cpus()) - 1); - idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); - raw_spin_unlock_irqrestore(&mcip_lock, flags); - } else { - /* - * DEST based distribution for Level Triggered intr can only - * have 1 CPU, so generalize it to always contain 1 cpu - */ - int cpu = ffs(distri); - - if (cpu != fls(distri)) - pr_warn("IDU irq %lx distri mode set to cpu %x\n", - hwirq, cpu); - - raw_spin_lock_irqsave(&mcip_lock, flags); - idu_set_dest(hwirq, cpu); - idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST); - raw_spin_unlock_irqrestore(&mcip_lock, flags); - } - return 0; } diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c index 42e964db2967..3d99a6091332 100644 --- a/arch/arc/kernel/module.c +++ b/arch/arc/kernel/module.c @@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, #ifdef CONFIG_ARC_DW2_UNWIND mod->arch.unw_sec_idx = 0; mod->arch.unw_info = NULL; - mod->arch.secstr = secstr; #endif + mod->arch.secstr = secstr; return 0; } @@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, } +#ifdef CONFIG_ARC_DW2_UNWIND if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0) module->arch.unw_sec_idx = tgtsec; +#endif return 0; diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 88674d972c9d..2afbafadb6ab 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus) */ static volatile int wake_flag; +#ifdef CONFIG_ISA_ARCOMPACT + +#define __boot_read(f) f +#define __boot_write(f, v) f = v + +#else + +#define __boot_read(f) arc_read_uncached_32(&f) +#define __boot_write(f, v) arc_write_uncached_32(&f, v) + +#endif + static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) { BUG_ON(cpu == 0); - wake_flag = cpu; + + __boot_write(wake_flag, cpu); } void arc_platform_smp_wait_to_boot(int cpu) { - while (wake_flag != cpu) + /* for halt-on-reset, we've waited already */ + if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET)) + return; + + while (__boot_read(wake_flag) != cpu) ; - wake_flag = 0; - __asm__ __volatile__("j @first_lines_of_secondary \n"); + __boot_write(wake_flag, 0); } - const char *arc_platform_smp_cpuinfo(void) { return plat_smp_ops.info ? : ""; diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index abd961f3e763..91ebe382147f 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, if (state.fault) goto fault; + /* clear any remanants of delay slot */ if (delay_mode(regs)) { - regs->ret = regs->bta; + regs->ret = regs->bta ~1U; regs->status32 &= ~STATUS_DE_MASK; } else { regs->ret += state.instr_len; diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index ec86ac0e3321..d408fa21a07c 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -23,7 +23,7 @@ static int l2_line_sz; static int ioc_exists; -int slc_enable = 1, ioc_enable = 0; +int slc_enable = 1, ioc_enable = 1; unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ @@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr, /* * For ARC700 MMUv3 I-cache and D-cache flushes - * Also reused for HS38 aliasing I-cache configuration + * - ARC700 programming model requires paddr and vaddr be passed in seperate + * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the + * caches actually alias or not. + * - For HS38, only the aliasing I-cache configuration uses the PTAG reg + * (non aliasing I-cache version doesn't; while D-cache can't possibly alias) */ static inline void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, @@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op) __after_dc_op(op); } +static inline void __dc_disable(void) +{ + const int r = ARC_REG_DC_CTRL; + + __dc_entire_op(OP_FLUSH_N_INV); + write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS); +} + +static void __dc_enable(void) +{ + const int r = ARC_REG_DC_CTRL; + + write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS); +} + /* For kernel mappings cache operation: index is same as paddr */ #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) @@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr, #else #define __dc_entire_op(op) +#define __dc_disable() +#define __dc_enable() #define __dc_line_op(paddr, vaddr, sz, op) #define __dc_line_op_k(paddr, sz, op) @@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) #endif } +noinline static void slc_entire_op(const int op) +{ + unsigned int ctrl, r = ARC_REG_SLC_CTRL; + + ctrl = read_aux_reg(r); + + if (!(op & OP_FLUSH)) /* i.e. OP_INV */ + ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ + else + ctrl |= SLC_CTRL_IM; + + write_aux_reg(r, ctrl); + + write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); + + /* Important to wait for flush to complete */ + while (read_aux_reg(r) & SLC_CTRL_BUSY); +} + +static inline void arc_slc_disable(void) +{ + const int r = ARC_REG_SLC_CTRL; + + slc_entire_op(OP_FLUSH_N_INV); + write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS); +} + +static inline void arc_slc_enable(void) +{ + const int r = ARC_REG_SLC_CTRL; + + write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS); +} + /*********************************************************** * Exported APIs */ @@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) return 0; } -void arc_cache_init(void) +/* + * IO-Coherency (IOC) setup rules: + * + * 1. Needs to be at system level, so only once by Master core + * Non-Masters need not be accessing caches at that time + * - They are either HALT_ON_RESET and kick started much later or + * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot() + * doesn't perturb caches or coherency unit + * + * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC, + * otherwise any straggler data might behave strangely post IOC enabling + * + * 3. All Caches need to be disabled when setting up IOC to elide any in-flight + * Coherency transactions + */ +noinline void __init arc_ioc_setup(void) { - unsigned int __maybe_unused cpu = smp_processor_id(); - char str[256]; + unsigned int ap_sz; - printk(arc_cache_mumbojumbo(0, str, sizeof(str))); + /* Flush + invalidate + disable L1 dcache */ + __dc_disable(); + + /* Flush + invalidate SLC */ + if (read_aux_reg(ARC_REG_SLC_BCR)) + slc_entire_op(OP_FLUSH_N_INV); + + /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */ + write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); /* - * Only master CPU needs to execute rest of function: - * - Assume SMP so all cores will have same cache config so - * any geomtry checks will be same for all - * - IOC setup / dma callbacks only need to be setup once + * IOC Aperture size: + * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M + * TBD: fix for PGU + 1GB of low mem + * TBD: fix for PAE */ - if (cpu) - return; + ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2; + write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz); + + write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); + write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); + + /* Re-enable L1 dcache */ + __dc_enable(); +} + +void __init arc_cache_init_master(void) +{ + unsigned int __maybe_unused cpu = smp_processor_id(); if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; @@ -985,30 +1073,14 @@ void arc_cache_init(void) } } - if (is_isa_arcv2() && l2_line_sz && !slc_enable) { - - /* IM set : flush before invalidate */ - write_aux_reg(ARC_REG_SLC_CTRL, - read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM); + /* Note that SLC disable not formally supported till HS 3.0 */ + if (is_isa_arcv2() && l2_line_sz && !slc_enable) + arc_slc_disable(); - write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); - - /* Important to wait for flush to complete */ - while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); - write_aux_reg(ARC_REG_SLC_CTRL, - read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE); - } + if (is_isa_arcv2() && ioc_enable) + arc_ioc_setup(); if (is_isa_arcv2() && ioc_enable) { - /* IO coherency base - 0x8z */ - write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); - /* IO coherency aperture size - 512Mb: 0x8z-0xAz */ - write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11); - /* Enable partial writes */ - write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); - /* Enable IO coherency */ - write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); - __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; __dma_cache_inv = __dma_cache_inv_ioc; __dma_cache_wback = __dma_cache_wback_ioc; @@ -1022,3 +1094,20 @@ void arc_cache_init(void) __dma_cache_wback = __dma_cache_wback_l1; } } + +void __ref arc_cache_init(void) +{ + unsigned int __maybe_unused cpu = smp_processor_id(); + char str[256]; + + printk(arc_cache_mumbojumbo(0, str, sizeof(str))); + + /* + * Only master CPU needs to execute rest of function: + * - Assume SMP so all cores will have same cache config so + * any geomtry checks will be same for all + * - IOC setup / dma callbacks only need to be setup once + */ + if (!cpu) + arc_cache_init_master(); +} diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 399e2f223d25..8c9415ed6280 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); #endif +long __init arc_get_mem_sz(void) +{ + return low_mem_sz; +} + /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ static int __init setup_mem_sz(char *str) { |