summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2012-07-26 08:53:06 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-07-26 16:24:14 +0200
commit0f6f281b731d20bfe75c13f85d33f3f05b440222 (patch)
tree0f8c23d3fca50dd2830ddc978501cd45a0057f45
parent8143adafd2d00b13f1db96ce06b6bf479e0bfe5b (diff)
downloadblackbird-obmc-linux-0f6f281b731d20bfe75c13f85d33f3f05b440222.tar.gz
blackbird-obmc-linux-0f6f281b731d20bfe75c13f85d33f3f05b440222.zip
s390/mm: downgrade page table after fork of a 31 bit process
The downgrade of the 4 level page table created by init_new_context is currently done only in start_thread31. If a 31 bit process forks the new mm uses a 4 level page table, including the task size of 2<<42 that goes along with it. This is incorrect as now a 31 bit process can map memory beyond 2GB. Define arch_dup_mmap to do the downgrade after fork. Cc: stable@vger.kernel.org Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/mmu_context.h14
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/mm/mmap.c12
-rw-r--r--arch/s390/mm/pgtable.c5
4 files changed, 25 insertions, 8 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5c63615f1349..077de7efc82f 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -11,7 +11,6 @@
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/ctl_reg.h>
-#include <asm-generic/mm_hooks.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
@@ -91,4 +90,17 @@ static inline void activate_mm(struct mm_struct *prev,
switch_mm(prev, next, current);
}
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+#ifdef CONFIG_64BIT
+ if (oldmm->context.asce_limit < mm->context.asce_limit)
+ crst_table_downgrade(mm, oldmm->context.asce_limit);
+#endif
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
#endif /* __S390_MMU_CONTEXT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c40fa91e38a8..11e4e3236937 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -120,7 +120,9 @@ struct stack_frame {
regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
+ __tlb_flush_mm(current->mm); \
crst_table_downgrade(current->mm, 1UL << 31); \
+ update_mm(current->mm, current); \
} while (0)
/* Forward declaration, a strange C thing */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 573384256c5c..c59a5efa58b1 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -103,9 +103,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
int s390_mmap_check(unsigned long addr, unsigned long len)
{
+ int rc;
+
if (!is_compat_task() &&
- len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
- return crst_table_upgrade(current->mm, 1UL << 53);
+ len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
+ rc = crst_table_upgrade(current->mm, 1UL << 53);
+ if (rc)
+ return rc;
+ update_mm(current->mm, current);
+ }
return 0;
}
@@ -125,6 +131,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
+ update_mm(mm, current);
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
}
return area;
@@ -147,6 +154,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
+ update_mm(mm, current);
area = arch_get_unmapped_area_topdown(filp, addr, len,
pgoff, flags);
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1cab221077cc..e9ac2d60b7e5 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -85,7 +85,6 @@ repeat:
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
goto repeat;
- update_mm(mm, current);
return 0;
}
@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
- if (mm->context.asce_limit <= limit)
- return;
- __tlb_flush_mm(mm);
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
- update_mm(mm, current);
}
#endif
OpenPOWER on IntegriCloud