summaryrefslogtreecommitdiffstats
path: root/arch/parisc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include')
-rw-r--r--arch/parisc/include/asm/cacheflush.h23
-rw-r--r--arch/parisc/include/asm/uaccess.h126
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
3 files changed, 48 insertions, 104 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 19c9c3c5f267..c7e15cc5c668 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates. Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
-
- flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
- void *cursor = vaddr;
- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
- struct page *page = vmalloc_to_page(cursor);
-
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
- flush_kernel_dcache_page(page);
- }
- flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index a0b461336b6a..852322f2d1f9 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -27,7 +27,8 @@
* that put_user is the same as __put_user, etc.
*/
-#define access_ok(type, uaddr, size) (1)
+#define access_ok(type, uaddr, size) \
+ ( (uaddr) == (uaddr) )
#define put_user __put_user
#define get_user __get_user
@@ -59,6 +60,15 @@ struct exception_table_entry {
".previous\n"
/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+
+/*
* The page fault handler stores, in a per-cpu area, the following information
* if a fixup routine is available.
*/
@@ -85,7 +95,7 @@ struct exception_data {
#define __get_user(x, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
- register long __gu_val __asm__ ("r9") = 0; \
+ register long __gu_val; \
\
load_sr2(); \
switch (sizeof(*(ptr))) { \
@@ -101,22 +111,23 @@ struct exception_data {
})
#define __get_user_asm(ldx, ptr) \
- __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
+ __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+ : "r"(ptr), "1"(__gu_err));
#if !defined(CONFIG_64BIT)
#define __get_user_asm64(ptr) \
- __asm__("\n1:\tldw 0(%%sr2,%2),%0" \
- "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
+ __asm__(" copy %%r0,%R0\n" \
+ "1: ldw 0(%%sr2,%2),%0\n" \
+ "2: ldw 4(%%sr2,%2),%R0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+ : "r"(ptr), "1"(__gu_err));
#endif /* !defined(CONFIG_64BIT) */
@@ -142,32 +153,31 @@ struct exception_data {
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
* instead of writing. This is because they do not write to any memory
* gcc knows about, so there are no aliasing issues. These macros must
- * also be aware that "fixup_put_user_skip_[12]" are executed in the
- * context of the fault, and any registers used there must be listed
- * as clobbers. In this case only "r1" is used by the current routines.
- * r8/r9 are already listed as err/val.
+ * also be aware that fixups are executed in the context of the fault,
+ * and any registers used there must be listed as clobbers.
+ * r8 is already listed as err.
*/
#define __put_user_asm(stx, x, ptr) \
__asm__ __volatile__ ( \
- "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
+ "1: " stx " %2,0(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err) \
- : "r1")
+ : "r"(ptr), "r"(x), "0"(__pu_err))
#if !defined(CONFIG_64BIT)
#define __put_user_asm64(__val, ptr) do { \
__asm__ __volatile__ ( \
- "\n1:\tstw %2,0(%%sr2,%1)" \
- "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
+ "1: stw %2,0(%%sr2,%1)\n" \
+ "2: stw %R2,4(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err) \
- : "r1"); \
+ : "r"(ptr), "r"(__val), "0"(__pu_err)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
@@ -177,9 +187,6 @@ struct exception_data {
* Complex access routines -- external declarations
*/
-extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
-extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
-extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
extern long strncpy_from_user(char *, const char __user *, long);
extern unsigned lclear_user(void __user *, unsigned long);
extern long lstrnlen_user(const char __user *, long);
@@ -193,59 +200,14 @@ extern long lstrnlen_user(const char __user *, long);
#define clear_user lclear_user
#define __clear_user lclear_user
-unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
- unsigned long len);
-unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
- unsigned long len);
-unsigned long copy_in_user(void __user *dst, const void __user *src,
- unsigned long len);
-#define __copy_in_user copy_in_user
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-extern void __compiletime_error("usercopy buffer size is too small")
-__bad_copy_user(void);
-
-static inline void copy_user_overflow(int size, unsigned long count)
-{
- WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
-}
-
-static __always_inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- int sz = __compiletime_object_size(to);
- unsigned long ret = n;
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(to, n, false);
- ret = __copy_from_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
- if (unlikely(ret))
- memset(to + (n - ret), 0, ret);
-
- return ret;
-}
-
-static __always_inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- int sz = __compiletime_object_size(from);
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(from, n, true);
- n = __copy_to_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
- return n;
-}
+unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
+ unsigned long len);
+unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
+ unsigned long len);
+unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
+ unsigned long len);
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
struct pt_regs;
int fixup_exception(struct pt_regs *regs);
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 6b0741e7a7ed..667c99421003 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -362,8 +362,9 @@
#define __NR_copy_file_range (__NR_Linux + 346)
#define __NR_preadv2 (__NR_Linux + 347)
#define __NR_pwritev2 (__NR_Linux + 348)
+#define __NR_statx (__NR_Linux + 349)
-#define __NR_Linux_syscalls (__NR_pwritev2 + 1)
+#define __NR_Linux_syscalls (__NR_statx + 1)
#define __IGNORE_select /* newselect */
OpenPOWER on IntegriCloud