summaryrefslogtreecommitdiffstats
path: root/include/asm-sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-05-14 12:52:56 +0900
committerPaul Mundt <lethal@hera.kernel.org>2007-06-08 02:43:36 +0000
commite08f457c7c0cc7720f28349f8780ea752c063441 (patch)
tree7b82666f2002d57dc57d022daf90c778265159e9 /include/asm-sh
parent7a302a9674593259866de4a9d5ae8edc03dc1934 (diff)
downloadblackbird-op-linux-e08f457c7c0cc7720f28349f8780ea752c063441.tar.gz
blackbird-op-linux-e08f457c7c0cc7720f28349f8780ea752c063441.zip
sh: __user annotations for __get/__put_user().
This adds in some more __user annotations. These weren't being handled properly in some of the __get_user and __put_user paths, so tidy those up. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/page.h1
-rw-r--r--include/asm-sh/sections.h2
-rw-r--r--include/asm-sh/system.h14
-rw-r--r--include/asm-sh/uaccess.h40
4 files changed, 37 insertions, 20 deletions
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 7464de4ba07d..011dfbe14a6b 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -60,6 +60,7 @@ extern void (*copy_page)(void *to, void *from);
extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
+extern unsigned long memory_start, memory_end;
#ifdef CONFIG_MMU
extern void clear_page_slow(void *to);
diff --git a/include/asm-sh/sections.h b/include/asm-sh/sections.h
index 57abd708b236..44c06c09e208 100644
--- a/include/asm-sh/sections.h
+++ b/include/asm-sh/sections.h
@@ -3,7 +3,5 @@
#include <asm-generic/sections.h>
-extern char _end[];
-
#endif /* __ASM_SH_SECTIONS_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 82f3e229e621..fb22fc3f87ad 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -8,9 +8,13 @@
#include <linux/irqflags.h>
#include <linux/compiler.h>
+#include <linux/linkage.h>
#include <asm/types.h>
#include <asm/ptrace.h>
+struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next);
+
/*
* switch_to() should switch tasks to task nr n, first
*/
@@ -271,6 +275,16 @@ extern unsigned int instruction_size(unsigned int insn);
void disable_hlt(void);
void enable_hlt(void);
+void default_idle(void);
+
+asmlinkage void break_point_trap(void);
+asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+
#define arch_align_stack(x) (x)
#endif
diff --git a/include/asm-sh/uaccess.h b/include/asm-sh/uaccess.h
index 5c49ed6715f2..f18a1a5c95c0 100644
--- a/include/asm-sh/uaccess.h
+++ b/include/asm-sh/uaccess.h
@@ -61,8 +61,6 @@ static inline void set_fs(mm_segment_t s)
*/
static inline int __access_ok(unsigned long addr, unsigned long size)
{
- extern unsigned long memory_start, memory_end;
-
return ((addr >= memory_start) && ((addr + size) < memory_end));
}
#else /* CONFIG_MMU */
@@ -76,7 +74,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
* __access_ok: Check if address with size is OK or not.
*
* We do three checks:
- * (1) is it user space?
+ * (1) is it user space?
* (2) addr + size --> carry?
* (3) addr + size >= 0x80000000 (PAGE_OFFSET)
*
@@ -142,11 +140,12 @@ static inline int access_ok(int type, const void __user *p, unsigned long size)
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
struct __large_struct { unsigned long buf[100]; };
-#define __m(x) (*(struct __large_struct *)(x))
+#define __m(x) (*(struct __large_struct __user *)(x))
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
+ __chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm(x, ptr, retval, "b"); \
@@ -175,6 +174,7 @@ do { \
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err, __gu_val; \
+ __chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_1(__gu_val, (ptr), __gu_err); \
@@ -300,6 +300,7 @@ extern void __get_user_unknown(void);
#define __put_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
+ __chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm(x, ptr, retval, "b"); \
@@ -328,7 +329,7 @@ do { \
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
\
if (__access_ok((unsigned long)__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
@@ -406,10 +407,10 @@ __asm__ __volatile__( \
#endif
extern void __put_user_unknown(void);
-
+
/* Generic arbitrary sized copy. */
/* Return the number of bytes NOT copied */
-extern __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
+__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
#define copy_to_user(to,from,n) ({ \
void *__copy_to = (void *) (to); \
@@ -420,14 +421,6 @@ __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
} else __copy_res = __copy_size; \
__copy_res; })
-#define __copy_to_user(to,from,n) \
- __copy_user((void *)(to), \
- (void *)(from), n)
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-
#define copy_from_user(to,from,n) ({ \
void *__copy_to = (void *) (to); \
void *__copy_from = (void *) (from); \
@@ -438,9 +431,20 @@ __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
} else __copy_res = __copy_size; \
__copy_res; })
-#define __copy_from_user(to,from,n) \
- __copy_user((void *)(to), \
- (void *)(from), n)
+static __always_inline unsigned long
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return __copy_user(to, (__force void *)from, n);
+}
+
+static __always_inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return __copy_user((__force void *)to, from, n);
+}
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
/*
* Clear the area and return remaining number of bytes
OpenPOWER on IntegriCloud