summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c64
1 files changed, 47 insertions, 17 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 2ac37372ef52..b55b1015724b 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -20,6 +20,7 @@
#include <asm/smp_plat.h>
#include <asm/thread_notify.h>
#include <asm/tlbflush.h>
+#include <asm/proc-fns.h>
/*
* On ARMv6, we have the following structure in the Context ID:
@@ -39,33 +40,51 @@
* non 64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
-
-#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
-#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
+#define NUM_USER_ASIDS ASID_FIRST_VERSION
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-DEFINE_PER_CPU(atomic64_t, active_asids);
+static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask)
+{
+ int cpu;
+ unsigned long flags;
+ u64 context_id, asid;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ context_id = mm->context.id.counter;
+ for_each_online_cpu(cpu) {
+ if (cpu == this_cpu)
+ continue;
+ /*
+ * We only need to send an IPI if the other CPUs are
+ * running the same ASID as the one being invalidated.
+ */
+ asid = per_cpu(active_asids, cpu).counter;
+ if (asid == 0)
+ asid = per_cpu(reserved_asids, cpu);
+ if (context_id == asid)
+ cpumask_set_cpu(cpu, mask);
+ }
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+#endif
+
#ifdef CONFIG_ARM_LPAE
static void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbl = __pa(swapper_pg_dir);
- unsigned long ttbh = 0;
-
/*
* Set TTBR0 to swapper_pg_dir which contains only global entries. The
* ASID is set to 0.
*/
- asm volatile(
- " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
- :
- : "r" (ttbl), "r" (ttbh));
+ cpu_set_ttbr(0, __pa(swapper_pg_dir));
isb();
}
#else
@@ -128,7 +147,16 @@ static void flush_context(unsigned int cpu)
asid = 0;
} else {
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
- __set_bit(ASID_TO_IDX(asid), asid_map);
+ /*
+ * If this CPU has already been through a
+ * rollover, but hasn't run another task in
+ * the meantime, we must preserve its reserved
+ * ASID, as this is the only trace we have of
+ * the process it is still running.
+ */
+ if (asid == 0)
+ asid = per_cpu(reserved_asids, i);
+ __set_bit(asid & ~ASID_MASK, asid_map);
}
per_cpu(reserved_asids, i) = asid;
}
@@ -167,17 +195,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
/*
* Allocate a free ASID. If we can't find one, take a
* note of the currently active ASIDs and mark the TLBs
- * as requiring flushes.
+ * as requiring flushes. We always count from ASID #1,
+ * as we reserve ASID #0 to switch via TTBR0 and indicate
+ * rollover events.
*/
- asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
if (asid == NUM_USER_ASIDS) {
generation = atomic64_add_return(ASID_FIRST_VERSION,
&asid_generation);
flush_context(cpu);
- asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
}
__set_bit(asid, asid_map);
- asid = generation | IDX_TO_ASID(asid);
+ asid |= generation;
cpumask_clear(mm_cpumask(mm));
}
OpenPOWER on IntegriCloud