summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c41
1 files changed, 24 insertions, 17 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 84e6f772e204..6eb97b3a7481 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -36,8 +36,8 @@
* The context ID is used by debuggers and trace logic, and
* should be unique within all running processes.
*
- * In big endian operation, the two 32 bit words are swapped if accesed by
- * non 64-bit operations.
+ * In big endian operation, the two 32 bit words are swapped if accessed
+ * by non-64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS ASID_FIRST_VERSION
@@ -78,20 +78,21 @@ void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
#endif
#ifdef CONFIG_ARM_LPAE
-static void cpu_set_reserved_ttbr0(void)
-{
- /*
- * Set TTBR0 to swapper_pg_dir which contains only global entries. The
- * ASID is set to 0.
- */
- cpu_set_ttbr(0, __pa(swapper_pg_dir));
- isb();
-}
+/*
+ * With LPAE, the ASID and page tables are updated atomicly, so there is
+ * no need for a reserved set of tables (the active ASID tracking prevents
+ * any issues across a rollover).
+ */
+#define cpu_set_reserved_ttbr0()
#else
static void cpu_set_reserved_ttbr0(void)
{
u32 ttb;
- /* Copy TTBR1 into TTBR0 */
+ /*
+ * Copy TTBR1 into TTBR0.
+ * This points at swapper_pg_dir, which contains only global
+ * entries so any speculative walks are perfectly safe.
+ */
asm volatile(
" mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
" mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
@@ -179,6 +180,7 @@ static int is_reserved_asid(u64 asid)
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
{
+ static u32 cur_idx = 1;
u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
@@ -193,10 +195,13 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
* Allocate a free ASID. If we can't find one, take a
* note of the currently active ASIDs and mark the TLBs
* as requiring flushes. We always count from ASID #1,
- * as we reserve ASID #0 to switch via TTBR0 and indicate
- * rollover events.
+ * as we reserve ASID #0 to switch via TTBR0 and to
+ * avoid speculative page table walks from hitting in
+ * any partial walk caches, which could be populated
+ * from overlapping level-1 descriptors used to map both
+ * the module area and the userspace stack.
*/
- asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
if (asid == NUM_USER_ASIDS) {
generation = atomic64_add_return(ASID_FIRST_VERSION,
&asid_generation);
@@ -204,6 +209,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
}
__set_bit(asid, asid_map);
+ cur_idx = asid;
asid |= generation;
cpumask_clear(mm_cpumask(mm));
}
@@ -221,8 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
__check_vmalloc_seq(mm);
/*
- * Required during context switch to avoid speculative page table
- * walking with the wrong TTBR.
+ * We cannot update the pgd and the ASID atomicly with classic
+ * MMU, so switch exclusively to global mappings to avoid
+ * speculative page table walking with the wrong TTBR.
*/
cpu_set_reserved_ttbr0();
OpenPOWER on IntegriCloud