summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/include/asm/mmu.h2
-rw-r--r--arch/arc/include/asm/mmu_context.h97
-rw-r--r--arch/arc/mm/tlb.c22
-rw-r--r--arch/arc/mm/tlbex.S5
4 files changed, 40 insertions, 86 deletions
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index 1639f25e47b1..c82db8bd7270 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -48,7 +48,7 @@
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long asid; /* Pvt Addr-Space ID for mm */
+ unsigned long asid; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 9b09d18f01b3..43a1b51bb8cc 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -34,39 +34,22 @@
* When it reaches max 255, the allocation cycle starts afresh by flushing
* the entire TLB and wrapping ASID back to zero.
*
- * For book-keeping, Linux uses a couple of data-structures:
- * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
- * time of say switch_mm( )
- * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
- * given an ASID, finding the mm struct associated.
- *
- * The round-robin allocation algorithm allows for ASID stealing.
- * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
- * already assigned to another (switched-out) task. Obviously the prev owner
- * is marked with an invalid ASID to make it request for a new ASID when it
- * gets scheduled next time. However its TLB entries (with ASID "x") could
- * exist, which must be cleared before the same ASID is used by the new owner.
- * Flushing them would be plausible but costly solution. Instead we force a
- * allocation policy quirk, which ensures that a stolen ASID won't have any
- * TLB entries associates, alleviating the need to flush.
- * The quirk essentially is not allowing ASID allocated in prev cycle
- * to be used past a roll-over in the next cycle.
- * When this happens (i.e. task ASID > asid tracker), task needs to refresh
- * its ASID, aligning it to current value of tracker. If the task doesn't get
- * scheduled past a roll-over, hence its ASID is not yet realigned with
- * tracker, such ASID is anyways safely reusable because it is
- * gauranteed that TLB entries with that ASID wont exist.
+ * A new allocation cycle, post rollover, could potentially reassign an ASID
+ * to a different task. Thus the rule is to refresh the ASID in a new cycle.
+ * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits
+ * serve as cycle/generation indicator and natural 32 bit unsigned math
+ * automagically increments the generation when lower 8 bits rollover.
*/
-#define FIRST_ASID 0
-#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
-#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
-#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
+#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
+#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
+
+#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
+#define MM_CTXT_NO_ASID 0UL
-/* ASID to mm struct mapping */
-extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+#define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK)
-extern int asid_cache;
+extern unsigned int asid_cache;
/*
* Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
@@ -74,59 +57,42 @@ extern int asid_cache;
*/
static inline void get_new_mmu_context(struct mm_struct *mm)
{
- struct mm_struct *prev_owner;
unsigned long flags;
local_irq_save(flags);
/*
* Move to new ASID if it was not from current alloc-cycle/generation.
+ * This is done by ensuring that the generation bits in both mm->ASID
+ * and cpu's ASID counter are exactly same.
*
* Note: Callers needing new ASID unconditionally, independent of
* generation, e.g. local_flush_tlb_mm() for forking parent,
* first need to destroy the context, setting it to invalid
* value.
*/
- if (mm->context.asid <= asid_cache)
+ if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK))
goto set_hw;
- /*
- * Relinquish the currently owned ASID (if any).
- * Doing unconditionally saves a cmp-n-branch; for already unused
- * ASID slot, the value was/remains NULL
- */
- asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+ /* move to new ASID and handle rollover */
+ if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) {
- /* move to new ASID */
- if (++asid_cache > MAX_ASID) { /* ASID roll-over */
- asid_cache = FIRST_ASID;
flush_tlb_all();
- }
- /*
- * Is next ASID already owned by some-one else (we are stealing it).
- * If so, let the orig owner be aware of this, so when it runs, it
- * asks for a brand new ASID. This would only happen for a long-lived
- * task with ASID from prev allocation cycle (before ASID roll-over).
- *
- * This might look wrong - if we are re-using some other task's ASID,
- * won't we use it's stale TLB entries too. Actually the algorithm takes
- * care of such a case: it ensures that task with ASID from prev alloc
- * cycle, when scheduled will refresh it's ASID
- * The stealing scenario described here will only happen if that task
- * didn't get a chance to refresh it's ASID - implying stale entries
- * won't exist.
- */
- prev_owner = asid_mm_map[asid_cache];
- if (prev_owner)
- prev_owner->context.asid = NO_ASID;
+ /*
+ * Above checke for rollover of 8 bit ASID in 32 bit container.
+ * If the container itself wrapped around, set it to a non zero
+ * "generation" to distinguish from no context
+ */
+ if (!asid_cache)
+ asid_cache = MM_CTXT_FIRST_CYCLE;
+ }
/* Assign new ASID to tsk */
- asid_mm_map[asid_cache] = mm;
mm->context.asid = asid_cache;
set_hw:
- write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE);
+ write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE);
local_irq_restore(flags);
}
@@ -138,7 +104,7 @@ set_hw:
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
- mm->context.asid = NO_ASID;
+ mm->context.asid = MM_CTXT_NO_ASID;
return 0;
}
@@ -167,14 +133,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
static inline void destroy_context(struct mm_struct *mm)
{
- unsigned long flags;
-
- local_irq_save(flags);
-
- asid_mm_map[mm->context.asid] = NULL;
- mm->context.asid = NO_ASID;
-
- local_irq_restore(flags);
+ mm->context.asid = MM_CTXT_NO_ASID;
}
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index b5c5e0aa0aaa..71cb26df4255 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -100,13 +100,7 @@
/* A copy of the ASID from the PID reg is kept in asid_cache */
-int asid_cache = FIRST_ASID;
-
-/* ASID to mm struct mapping. We have one extra entry corresponding to
- * NO_ASID to save us a compare when clearing the mm entry for old asid
- * see get_new_mmu_context (asm-arc/mmu_context.h)
- */
-struct mm_struct *asid_mm_map[NUM_ASID + 1];
+unsigned int asid_cache = MM_CTXT_FIRST_CYCLE;
/*
* Utility Routine to erase a J-TLB entry
@@ -281,7 +275,6 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
unsigned long flags;
- unsigned int asid;
/* If range @start to @end is more than 32 TLB entries deep,
* its better to move to a new ASID rather than searching for
@@ -303,11 +296,10 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
start &= PAGE_MASK;
local_irq_save(flags);
- asid = vma->vm_mm->context.asid;
- if (asid != NO_ASID) {
+ if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) {
while (start < end) {
- tlb_entry_erase(start | (asid & 0xff));
+ tlb_entry_erase(start | hw_pid(vma->vm_mm));
start += PAGE_SIZE;
}
}
@@ -361,9 +353,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
*/
local_irq_save(flags);
- if (vma->vm_mm->context.asid != NO_ASID) {
- tlb_entry_erase((page & PAGE_MASK) |
- (vma->vm_mm->context.asid & 0xff));
+ if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) {
+ tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm));
utlb_invalidate();
}
@@ -709,7 +700,8 @@ void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
* - SW needs to have a valid ASID
*/
if (addr < 0x70000000 &&
- ((mmu_asid != mm_asid) || (mm_asid == NO_ASID)))
+ ((mm_asid == MM_CTXT_NO_ASID) ||
+ (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
print_asid_mismatch(mm_asid, mmu_asid, 0);
}
#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 88897a112d55..cf7d7d9ad695 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -140,12 +140,15 @@ ex_saved_reg1:
GET_CURR_TASK_ON_CPU r3
ld r0, [r3, TASK_ACT_MM]
ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
+ breq r0, 0, 55f ; Error if no ASID allocated
lr r1, [ARC_REG_PID]
and r1, r1, 0xFF
- breq r1, r0, 5f
+ and r2, r0, 0xFF ; MMU PID bits only for comparison
+ breq r1, r2, 5f
+55:
; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
lr r2, [erstatus]
bbit0 r2, STATUS_U_BIT, 5f
OpenPOWER on IntegriCloud