From 156152f84e57bff4d9b5d70be6f3f16116bf6fb1 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Thu, 25 Oct 2012 17:24:12 +0200 Subject: s390/mm: use pmd_large() instead of pmd_huge() Without CONFIG_HUGETLB_PAGE, pmd_huge() will always return 0. So pmd_large() should be used instead in places where both transparent huge pages and hugetlbfs pages can occur. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/lib/uaccess_pt.c | 2 +- arch/s390/mm/gup.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 2d37bb861faf..9017a63dda3d 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -39,7 +39,7 @@ static __always_inline unsigned long follow_table(struct mm_struct *mm, pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return -0x10UL; - if (pmd_huge(*pmd)) { + if (pmd_large(*pmd)) { if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) return -0x04UL; return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 60acb93a4680..8b8285310b5a 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -126,7 +126,7 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, */ if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; - if (unlikely(pmd_huge(pmd))) { + if (unlikely(pmd_large(pmd))) { if (!gup_huge_pmd(pmdp, pmd, addr, next, write, pages, nr)) return 0; -- cgit v1.2.1 From d8e7a33df0976459674dece4b630b5d59796ece7 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Thu, 25 Oct 2012 17:42:50 +0200 Subject: s390/thp: respect page protection in pmd_none() and pmd_present() Similar to pte_none() and pte_present(), the pmd functions should also respect page protection of huge pages, especially PROT_NONE. This patch also simplifies massage_pgprot_pmd() by adding new definitions for huge page protection. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgtable.h | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index dd647c919a66..2d3b7cb26005 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -506,12 +506,15 @@ static inline int pud_bad(pud_t pud) static inline int pmd_present(pmd_t pmd) { - return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; + unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; + return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE || + !(pmd_val(pmd) & _SEGMENT_ENTRY_INV); } static inline int pmd_none(pmd_t pmd) { - return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; + return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && + !(pmd_val(pmd) & _SEGMENT_ENTRY_RO); } static inline int pmd_large(pmd_t pmd) @@ -1223,6 +1226,11 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE + +#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) +#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) +#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) + #define __HAVE_ARCH_PGTABLE_DEPOSIT extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); @@ -1242,16 +1250,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) { - unsigned long pgprot_pmd = 0; - - if (pgprot_val(pgprot) & _PAGE_INVALID) { - if (pgprot_val(pgprot) & _PAGE_SWT) - pgprot_pmd |= _HPAGE_TYPE_NONE; - pgprot_pmd |= _SEGMENT_ENTRY_INV; - } - if (pgprot_val(pgprot) & _PAGE_RO) - pgprot_pmd |= _SEGMENT_ENTRY_RO; - return pgprot_pmd; + /* + * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) + * Convert to segment table entry format. + */ + if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) + return pgprot_val(SEGMENT_NONE); + if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) + return pgprot_val(SEGMENT_RO); + return pgprot_val(SEGMENT_RW); } static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) @@ -1269,7 +1276,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmd_mkwrite(pmd_t pmd) { - pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; + /* Do not clobber _HPAGE_TYPE_NONE pages! */ + if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) + pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; return pmd; } -- cgit v1.2.1 From 619506d5dabb49b7f223a4f2f6b8d697574dd799 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Mon, 2 Apr 2012 18:22:45 +0200 Subject: s390: Move css limits from drivers/s390/cio/ to include/asm/. There's no need to keep __MAX_SUBCHANNEL and __MAX_SSID private to the common I/O layer when __MAX_CSSID is usable by everybody. Signed-off-by: Cornelia Huck Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cio.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 55bde6035216..ad2b924167d7 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -9,6 +9,8 @@ #define LPM_ANYPATH 0xff #define __MAX_CSSID 0 +#define __MAX_SUBCHANNEL 65535 +#define __MAX_SSID 3 #include -- cgit v1.2.1 From 99e639b791f5cfae0b8d42f5fe6c1e8839932bea Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 31 Oct 2012 17:14:39 +0100 Subject: s390/sclp: fix addressing mode clobber The early mini sclp driver may be called in zArch mode either in 31 or 64 bit addressing mode. If called in 31 bit addressing mode the new external interrupt psw however would switch to 64 bit addressing mode. This would cause an addressing exception within the interrupt handler, since the code didn't expect the zArch/31 bit addressing mode combination. Fix this by setting the new psw addressing mode bits so they fit the current addressing mode. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/sclp.S | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index bf053898630d..b6506ee32a36 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -44,6 +44,12 @@ _sclp_wait_int: #endif mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) mvc 0(16,%r8),0(%r9) +#ifdef CONFIG_64BIT + epsw %r6,%r7 # set current addressing mode + nill %r6,0x1 # in new psw (31 or 64 bit mode) + nilh %r7,0x8000 + stm %r6,%r7,0(%r8) +#endif lhi %r6,0x0200 # cr mask for ext int (cr0.54) ltr %r2,%r2 jz .LsetctS1 @@ -87,7 +93,7 @@ _sclp_wait_int: .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int #ifdef CONFIG_64BIT .LextpswS1_64: - .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit + .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit #endif .LwaitpswS1: .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int -- cgit v1.2.1 From fa968ee215c0ca91e4a9c3a69ac2405aae6e5d2f Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 7 Nov 2012 10:44:08 +0100 Subject: s390/signal: set correct address space control If user space is running in primary mode it can switch to secondary or access register mode, this is used e.g. in the clock_gettime code of the vdso. If a signal is delivered to the user space process while it has been running in access register mode the signal handler is executed in access register mode as well which will result in a crash most of the time. Set the address space control bits in the PSW to the default for the execution of the signal handler and make sure that the previous address space control is restored on signal return. Take care that user space can not switch to the kernel address space by modifying the registers in the signal frame. Cc: stable@vger.kernel.org Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/compat.h | 2 +- arch/s390/include/uapi/asm/ptrace.h | 4 ++-- arch/s390/kernel/compat_signal.c | 14 ++++++++++++-- arch/s390/kernel/signal.c | 14 ++++++++++++-- 4 files changed, 27 insertions(+), 7 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index a34a9d612fc0..18cd6b592650 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -20,7 +20,7 @@ #define PSW32_MASK_CC 0x00003000UL #define PSW32_MASK_PM 0x00000f00UL -#define PSW32_MASK_USER 0x00003F00UL +#define PSW32_MASK_USER 0x0000FF00UL #define PSW32_ADDR_AMODE 0x80000000UL #define PSW32_ADDR_INSN 0x7FFFFFFFUL diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index 705588a16d70..a5ca214b34fd 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h @@ -239,7 +239,7 @@ typedef struct #define PSW_MASK_EA 0x00000000UL #define PSW_MASK_BA 0x00000000UL -#define PSW_MASK_USER 0x00003F00UL +#define PSW_MASK_USER 0x0000FF00UL #define PSW_ADDR_AMODE 0x80000000UL #define PSW_ADDR_INSN 0x7FFFFFFFUL @@ -269,7 +269,7 @@ typedef struct #define PSW_MASK_EA 0x0000000100000000UL #define PSW_MASK_BA 0x0000000080000000UL -#define PSW_MASK_USER 0x00003F8180000000UL +#define PSW_MASK_USER 0x0000FF8180000000UL #define PSW_ADDR_AMODE 0x0000000000000000UL #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index a1e8a8694bb7..593fcc9253fc 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -309,6 +309,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) + regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); for (i = 0; i < NUM_GPRS; i++) regs->gprs[i] = (__u64) regs32.gprs[i]; @@ -481,7 +485,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__force __u64) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); @@ -549,7 +556,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__u64) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index c13a2a37ef00..d1259d875074 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -136,6 +136,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (user_sregs.regs.psw.mask & PSW_MASK_USER); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) + regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); /* Check for invalid amode */ if (regs->psw.mask & PSW_MASK_EA) regs->psw.mask |= PSW_MASK_BA; @@ -273,7 +277,10 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); @@ -346,7 +353,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); -- cgit v1.2.1 From 658e5ce705f2a09ab681eb61ca7c8619bb7a783d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sat, 10 Nov 2012 11:04:27 +0100 Subject: s390/topology: fix core id vs physical package id mix-up The current topology code confuses core id vs physical package id. In other words /sys/devices/system/cpu/cpuX/topology/core_id displays the physical_package_id (aka socket id) instead of the core id. The physical_package_id sysfs attribute always displays "-1" instead of the socket id. Fix this mix-up with a small patch which defines and initializes topology_physical_package_id correctly and fixes the broken core id handling. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/topology.h | 3 +++ arch/s390/kernel/topology.c | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 9ca305383760..9935cbd6a46f 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -8,6 +8,9 @@ struct cpu; #ifdef CONFIG_SCHED_BOOK +extern unsigned char cpu_socket_id[NR_CPUS]; +#define topology_physical_package_id(cpu) (cpu_socket_id[cpu]) + extern unsigned char cpu_core_id[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 54d93f4b6818..dd55f7c20104 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -40,6 +40,7 @@ static DEFINE_SPINLOCK(topology_lock); static struct mask_info core_info; cpumask_t cpu_core_map[NR_CPUS]; unsigned char cpu_core_id[NR_CPUS]; +unsigned char cpu_socket_id[NR_CPUS]; static struct mask_info book_info; cpumask_t cpu_book_map[NR_CPUS]; @@ -83,11 +84,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, cpumask_set_cpu(lcpu, &book->mask); cpu_book_id[lcpu] = book->id; cpumask_set_cpu(lcpu, &core->mask); + cpu_core_id[lcpu] = rcpu; if (one_core_per_cpu) { - cpu_core_id[lcpu] = rcpu; + cpu_socket_id[lcpu] = rcpu; core = core->next; } else { - cpu_core_id[lcpu] = core->id; + cpu_socket_id[lcpu] = core->id; } smp_cpu_set_polarization(lcpu, tl_cpu->pp); } -- cgit v1.2.1 From d55c4c613fc4d4ad2ba0fc6fa2b57176d420f7e4 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 22 Oct 2012 15:49:02 +0200 Subject: s390/gup: add missing TASK_SIZE check to get_user_pages_fast() When walking page tables we need to make sure that everything is within bounds of the ASCE limit of the task's address space. Otherwise we might calculate e.g. a pud pointer which is not within a pud and dereference it. So check against TASK_SIZE (which is the ASCE limit) before walking page tables. Reviewed-by: Gerald Schaefer Cc: stable@vger.kernel.org Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/gup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 8b8285310b5a..16fb3c1615dc 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -229,7 +229,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (end < start) + if ((end < start) || (end > TASK_SIZE)) goto slow_irqon; /* -- cgit v1.2.1 From 516bad44b9f3bdcb0be6be0252b7557bf7a149e4 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 22 Oct 2012 15:58:26 +0200 Subject: s390/gup: fix access_ok() usage in __get_user_pages_fast() access_ok() returns always "true" on s390. Therefore all access_ok() invocations are rather pointless. However when walking page tables we need to make sure that everything is within bounds of the ASCE limit of the task's address space. So remove the access_ok() call and add the same check we have in get_user_pages_fast(). Reviewed-by: Gerald Schaefer Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/gup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 16fb3c1615dc..1f5315d1215c 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, - (void __user *)start, len))) + if ((end < start) || (end > TASK_SIZE)) return 0; local_irq_save(flags); -- cgit v1.2.1 From 4bffbb3455372a26816e364fb4448810f7014452 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 8 Nov 2012 14:18:47 +0100 Subject: s390/mm: have 16 byte aligned struct pages Select HAVE_ALIGNED_STRUCT_PAGE on s390, so that the slub allocator can make use of compare and swap double for lockless updates. This increases the size of struct page to 64 bytes (instead of 56 bytes), however the performance gain justifies the increased size: - now excactly four struct pages fit into a single cache line; the case that accessing a struct page causes two cache line loads does not exist anymore. - calculating the offset of a struct page within the memmap array is only a simple shift instead of a more expensive multiplication. A "hackbench 200 process 200" run on a 32 cpu system did show an 8% runtime improvement. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 5dba755a43e6..d385f396dfee 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -96,6 +96,7 @@ config S390 select HAVE_MEMBLOCK_NODE_MAP select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_DOUBLE + select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_VIRT_CPU_ACCOUNTING select VIRT_CPU_ACCOUNTING select ARCH_DISCARD_MEMBLOCK -- cgit v1.2.1 From 9d73fc2d641f8831c9dc803177fe47c02120cc36 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 2 Dec 2012 17:55:07 +0000 Subject: open*(2) compat fixes (s390, arm64) The usual rules for open()/openat()/open_by_handle_at() are 1) native 32bit - don't force O_LARGEFILE in flags 2) native 64bit - force O_LARGEFILE in flags 3) compat on 64bit host - as for native 32bit 4) native 32bit ABI for 64bit system (mips/n32, x86/x32) - as for native 64bit There are only two exceptions - s390 compat has open() forcing O_LARGEFILE and arm64 compat has open_by_handle_at() doing the same thing. The same binaries on native host (s390/31 and arm resp.) will *not* force O_LARGEFILE, so IMO both are emulation bugs. Objections? The fix is obvious... Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- arch/s390/kernel/compat_wrapper.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index ad79b846535c..827e094a2f49 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -28,7 +28,7 @@ ENTRY(sys32_open_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int lgfr %r4,%r4 # int - jg sys_open # branch to system call + jg compat_sys_open # branch to system call ENTRY(sys32_close_wrapper) llgfr %r2,%r2 # unsigned int -- cgit v1.2.1