diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2016-02-12 14:24:37 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2016-02-12 14:24:37 +0100 |
commit | 05fd934ba585ea8c9f129f15f11327adb1f96fe3 (patch) | |
tree | 7b678c512c43419ccf484e71c84d69caee84a6da /arch/s390/mm | |
parent | ebb7c78d358b2ea45c7d997423e6feb42e5ce4ef (diff) | |
parent | 382ab95d1af85381d8a5dff09b16a80c7e492534 (diff) | |
download | blackbird-op-linux-05fd934ba585ea8c9f129f15f11327adb1f96fe3.tar.gz blackbird-op-linux-05fd934ba585ea8c9f129f15f11327adb1f96fe3.zip |
Merge tag 'topic/drm-misc-2016-02-12' into drm-intel-next-queued
Backmerge to get at the new encoder_mask support in atomic helpers.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/extable.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/extmem.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/gup.c | 25 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/maccess.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/mem_detect.c | 7 | ||||
-rw-r--r-- | arch/s390/mm/mmap.c | 12 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 58 |
9 files changed, 56 insertions, 72 deletions
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c index 4d1ee88864e8..18c8b819b0aa 100644 --- a/arch/s390/mm/extable.c +++ b/arch/s390/mm/extable.c @@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start, int i; /* Normalize entries to being relative to the start of the section */ - for (p = start, i = 0; p < finish; p++, i += 8) + for (p = start, i = 0; p < finish; p++, i += 8) { p->insn += i; + p->fixup += i + 4; + } sort(start, finish - start, sizeof(*start), cmp_ex, NULL); /* Denormalize all entries */ - for (p = start, i = 0; p < finish; p++, i += 8) + for (p = start, i = 0; p < finish; p++, i += 8) { p->insn -= i; + p->fixup -= i + 4; + } } #ifdef CONFIG_MODULES diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 18fccc303db7..a1bf4ad8925d 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -94,7 +94,7 @@ static DEFINE_MUTEX(dcss_lock); static LIST_HEAD(dcss_list); static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", "EW/EN-MIXED" }; -static int loadshr_scode, loadnsr_scode, findseg_scode; +static int loadshr_scode, loadnsr_scode; static int segext_scode, purgeseg_scode; static int scode_set; @@ -130,7 +130,6 @@ dcss_set_subcodes(void) loadshr_scode = DCSS_LOADSHRX; loadnsr_scode = DCSS_LOADNSRX; purgeseg_scode = DCSS_PURGESEG; - findseg_scode = DCSS_FINDSEGX; segext_scode = DCSS_SEGEXTX; return 0; } @@ -138,7 +137,6 @@ dcss_set_subcodes(void) loadshr_scode = DCSS_LOADNOLY; loadnsr_scode = DCSS_LOADNSR; purgeseg_scode = DCSS_PURGESEG; - findseg_scode = DCSS_FINDSEG; segext_scode = DCSS_SEGEXT; return 0; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index ec1a30d0d11a..791a4146052c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -228,7 +228,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr) return; printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", regs->int_code & 0xffff, regs->int_code >> 17); - print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); + print_vma_addr(KERN_CONT "in ", regs->psw.addr); printk(KERN_CONT "\n"); printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); @@ -254,12 +254,11 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) static noinline void do_no_context(struct pt_regs *regs) { const struct exception_table_entry *fixup; - unsigned long address; /* Are we prepared to handle this kernel fault? */ - fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); + fixup = search_exception_tables(regs->psw.addr); if (fixup) { - regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; + regs->psw.addr = extable_fixup(fixup); return; } @@ -267,7 +266,6 @@ static noinline void do_no_context(struct pt_regs *regs) * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - address = regs->int_parm_long & __FAIL_ADDR_MASK; if (!user_space_fault(regs)) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " in virtual kernel address space\n"); diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 12bbf0e8478f..13dab0c1645c 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -55,7 +55,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long mask, result; - struct page *head, *page, *tail; + struct page *head, *page; int refs; result = write ? 0 : _SEGMENT_ENTRY_PROTECT; @@ -67,7 +67,6 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, refs = 0; head = pmd_page(pmd); page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); - tail = page; do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; @@ -88,16 +87,6 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, return 0; } - /* - * Any tail page need their mapcount reference taken before we - * return. - */ - while (refs--) { - if (PageTail(tail)) - get_huge_page_tail(tail); - tail++; - } - return 1; } @@ -116,16 +105,7 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, pmd = *pmdp; barrier(); next = pmd_addr_end(addr, end); - /* - * The pmd_trans_splitting() check below explains why - * pmdp_splitting_flush() has to serialize with - * smp_call_function() against our disabled IRQs, to stop - * this gup-fast code from running while we set the - * splitting bit in the pmd. Returning zero will take - * the slow path that will call wait_split_huge_page() - * if the pmd is still in splitting state. - */ - if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + if (pmd_none(pmd)) return 0; if (unlikely(pmd_large(pmd))) { /* @@ -233,6 +213,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct mm_struct *mm = current->mm; int nr, ret; + might_sleep(); start &= PAGE_MASK; nr = __get_user_pages_fast(start, nr_pages, write, pages); if (nr == nr_pages) diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c722400c7697..73e290337092 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -98,7 +98,7 @@ void __init paging_init(void) __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); - arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); + __arch_local_irq_stosm(0x04); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 8a993a53fcd6..fec59c067d0d 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -163,11 +163,11 @@ static int is_swapped(unsigned long addr) unsigned long lc; int cpu; - if (addr < sizeof(struct _lowcore)) + if (addr < sizeof(struct lowcore)) return 1; for_each_online_cpu(cpu) { lc = (unsigned long) lowcore_ptr[cpu]; - if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc) + if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) continue; return 1; } diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c index e00f0d5d296d..d612cc3eec6a 100644 --- a/arch/s390/mm/mem_detect.c +++ b/arch/s390/mm/mem_detect.c @@ -14,8 +14,6 @@ #include <asm/sclp.h> #include <asm/setup.h> -#define ADDR2G (1ULL << 31) - #define CHUNK_READ_WRITE 0 #define CHUNK_READ_ONLY 1 @@ -27,15 +25,14 @@ static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size) void __init detect_memory_memblock(void) { - unsigned long long memsize, rnmax, rzm; - unsigned long addr, size; + unsigned long memsize, rnmax, rzm, addr, size; int type; rzm = sclp.rzm; rnmax = sclp.rnmax; memsize = rzm * rnmax; if (!rzm) - rzm = 1ULL << 17; + rzm = 1UL << 17; max_physmem_end = memsize; addr = 0; /* keep memblock lists close to the kernel */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index ea01477b4aa6..45c4daa49930 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -169,12 +169,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { - if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) + if (is_compat_task() || TASK_SIZE >= TASK_MAX_SIZE) return 0; if (!(flags & MAP_FIXED)) addr = 0; if ((addr + len) >= TASK_SIZE) - return crst_table_upgrade(current->mm, 1UL << 53); + return crst_table_upgrade(current->mm, TASK_MAX_SIZE); return 0; } @@ -189,9 +189,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); if (!(area & ~PAGE_MASK)) return area; - if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { /* Upgrade the page table to 4 levels and retry. */ - rc = crst_table_upgrade(mm, 1UL << 53); + rc = crst_table_upgrade(mm, TASK_MAX_SIZE); if (rc) return (unsigned long) rc; area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); @@ -211,9 +211,9 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); if (!(area & ~PAGE_MASK)) return area; - if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { /* Upgrade the page table to 4 levels and retry. */ - rc = crst_table_upgrade(mm, 1UL << 53); + rc = crst_table_upgrade(mm, TASK_MAX_SIZE); if (rc) return (unsigned long) rc; area = arch_get_unmapped_area_topdown(filp, addr, len, diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 54ef3bc01b43..5109827883ac 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -55,7 +55,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) unsigned long entry; int flush; - BUG_ON(limit > (1UL << 53)); + BUG_ON(limit > TASK_MAX_SIZE); flush = 0; repeat: table = crst_table_alloc(mm); @@ -133,7 +133,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) /** * gmap_alloc - allocate a guest address space * @mm: pointer to the parent mm_struct - * @limit: maximum size of the gmap address space + * @limit: maximum address of the gmap address space * * Returns a guest address space structure. */ @@ -402,7 +402,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, if ((from | to | len) & (PMD_SIZE - 1)) return -EINVAL; if (len == 0 || from + len < from || to + len < to || - from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) + from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end) return -EINVAL; flush = 0; @@ -578,17 +578,29 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr, { unsigned long vmaddr; int rc; + bool unlocked; down_read(&gmap->mm->mmap_sem); + +retry: + unlocked = false; vmaddr = __gmap_translate(gmap, gaddr); if (IS_ERR_VALUE(vmaddr)) { rc = vmaddr; goto out_up; } - if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) { + if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, + &unlocked)) { rc = -EFAULT; goto out_up; } + /* + * In the case that fixup_user_fault unlocked the mmap_sem during + * faultin redo __gmap_translate to not race with a map/unmap_segment. + */ + if (unlocked) + goto retry; + rc = __gmap_link(gmap, gaddr, vmaddr); out_up: up_read(&gmap->mm->mmap_sem); @@ -603,10 +615,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm) else if (is_migration_entry(entry)) { struct page *page = migration_entry_to_page(entry); - if (PageAnon(page)) - dec_mm_counter(mm, MM_ANONPAGES); - else - dec_mm_counter(mm, MM_FILEPAGES); + dec_mm_counter(mm, mm_counter(page)); } free_swap_and_cache(entry); } @@ -717,12 +726,14 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) spinlock_t *ptl; pte_t *ptep, entry; pgste_t pgste; + bool unlocked; int rc = 0; if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK)) return -EINVAL; down_read(&gmap->mm->mmap_sem); while (len) { + unlocked = false; /* Convert gmap address and connect the page tables */ addr = __gmap_translate(gmap, gaddr); if (IS_ERR_VALUE(addr)) { @@ -730,10 +741,14 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) break; } /* Get the page mapped */ - if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { + if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE, + &unlocked)) { rc = -EFAULT; break; } + /* While trying to map mmap_sem got unlocked. Let us retry */ + if (unlocked) + continue; rc = __gmap_link(gmap, gaddr, addr); if (rc) break; @@ -794,9 +809,11 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, spinlock_t *ptl; pgste_t old, new; pte_t *ptep; + bool unlocked; down_read(&mm->mmap_sem); retry: + unlocked = false; ptep = get_locked_pte(mm, addr, &ptl); if (unlikely(!ptep)) { up_read(&mm->mmap_sem); @@ -805,7 +822,12 @@ retry: if (!(pte_val(*ptep) & _PAGE_INVALID) && (pte_val(*ptep) & _PAGE_PROTECT)) { pte_unmap_unlock(ptep, ptl); - if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { + /* + * We do not really care about unlocked. We will retry either + * way. But this allows fixup_user_fault to enable userfaultfd. + */ + if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE, + &unlocked)) { up_read(&mm->mmap_sem); return -EFAULT; } @@ -1308,22 +1330,6 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, return 1; } -static void pmdp_splitting_flush_sync(void *arg) -{ - /* Simply deliver the interrupt */ -} - -void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmdp) -{ - VM_BUG_ON(address & ~HPAGE_PMD_MASK); - if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT, - (unsigned long *) pmdp)) { - /* need to serialize against gup-fast (IRQ disabled) */ - smp_call_function(pmdp_splitting_flush_sync, NULL, 1); - } -} - void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { |