diff options
Diffstat (limited to 'include/asm-s390')
-rw-r--r-- | include/asm-s390/atomic.h | 8 | ||||
-rw-r--r-- | include/asm-s390/bitops.h | 440 | ||||
-rw-r--r-- | include/asm-s390/cpcmd.h | 18 | ||||
-rw-r--r-- | include/asm-s390/debug.h | 48 | ||||
-rw-r--r-- | include/asm-s390/emergency-restart.h | 6 | ||||
-rw-r--r-- | include/asm-s390/kexec.h | 42 | ||||
-rw-r--r-- | include/asm-s390/lowcore.h | 11 | ||||
-rw-r--r-- | include/asm-s390/processor.h | 57 | ||||
-rw-r--r-- | include/asm-s390/ptrace.h | 2 | ||||
-rw-r--r-- | include/asm-s390/spinlock.h | 252 | ||||
-rw-r--r-- | include/asm-s390/system.h | 38 | ||||
-rw-r--r-- | include/asm-s390/thread_info.h | 4 | ||||
-rw-r--r-- | include/asm-s390/uaccess.h | 21 | ||||
-rw-r--r-- | include/asm-s390/unistd.h | 9 |
14 files changed, 376 insertions, 580 deletions
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index d5a05cf47168..9d86ba6f12d0 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h @@ -123,19 +123,19 @@ typedef struct { #define atomic64_read(v) ((v)->counter) #define atomic64_set(v,i) (((v)->counter) = (i)) -static __inline__ void atomic64_add(int i, atomic64_t * v) +static __inline__ void atomic64_add(long long i, atomic64_t * v) { __CSG_LOOP(v, i, "agr"); } -static __inline__ long long atomic64_add_return(int i, atomic64_t * v) +static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) { return __CSG_LOOP(v, i, "agr"); } -static __inline__ long long atomic64_add_negative(int i, atomic64_t * v) +static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) { return __CSG_LOOP(v, i, "agr") < 0; } -static __inline__ void atomic64_sub(int i, atomic64_t * v) +static __inline__ void atomic64_sub(long long i, atomic64_t * v) { __CSG_LOOP(v, i, "sgr"); } diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 16bb08499c7f..8651524217fd 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -527,13 +527,64 @@ __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { __constant_test_bit((nr),(addr)) : \ __test_bit((nr),(addr)) ) -#ifndef __s390x__ +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + */ +static inline unsigned long ffz(unsigned long word) +{ + unsigned long bit = 0; + +#ifdef __s390x__ + if (likely((word & 0xffffffff) == 0xffffffff)) { + word >>= 32; + bit += 32; + } +#endif + if (likely((word & 0xffff) == 0xffff)) { + word >>= 16; + bit += 16; + } + if (likely((word & 0xff) == 0xff)) { + word >>= 8; + bit += 8; + } + return bit + _zb_findmap[word & 0xff]; +} + +/* + * __ffs = find first bit in word. Undefined if no bit exists, + * so code should check against 0UL first.. + */ +static inline unsigned long __ffs (unsigned long word) +{ + unsigned long bit = 0; + +#ifdef __s390x__ + if (likely((word & 0xffffffff) == 0)) { + word >>= 32; + bit += 32; + } +#endif + if (likely((word & 0xffff) == 0)) { + word >>= 16; + bit += 16; + } + if (likely((word & 0xff) == 0)) { + word >>= 8; + bit += 8; + } + return bit + _sb_findmap[word & 0xff]; +} /* * Find-bit routines.. */ + +#ifndef __s390x__ + static inline int -find_first_zero_bit(const unsigned long * addr, unsigned int size) +find_first_zero_bit(const unsigned long * addr, unsigned long size) { typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; unsigned long cmp, count; @@ -548,7 +599,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned int size) " srl %2,5\n" "0: c %1,0(%0,%4)\n" " jne 1f\n" - " ahi %0,4\n" + " la %0,4(%0)\n" " brct %2,0b\n" " lr %0,%3\n" " j 4f\n" @@ -574,7 +625,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned int size) } static inline int -find_first_bit(const unsigned long * addr, unsigned int size) +find_first_bit(const unsigned long * addr, unsigned long size) { typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; unsigned long cmp, count; @@ -589,7 +640,7 @@ find_first_bit(const unsigned long * addr, unsigned int size) " srl %2,5\n" "0: c %1,0(%0,%4)\n" " jne 1f\n" - " ahi %0,4\n" + " la %0,4(%0)\n" " brct %2,0b\n" " lr %0,%3\n" " j 4f\n" @@ -614,89 +665,8 @@ find_first_bit(const unsigned long * addr, unsigned int size) return (res < size) ? res : size; } -static inline int -find_next_zero_bit (const unsigned long * addr, int size, int offset) -{ - unsigned long * p = ((unsigned long *) addr) + (offset >> 5); - unsigned long bitvec, reg; - int set, bit = offset & 31, res; - - if (bit) { - /* - * Look for zero in first word - */ - bitvec = (*p) >> bit; - __asm__(" slr %0,%0\n" - " lhi %2,0xff\n" - " tml %1,0xffff\n" - " jno 0f\n" - " ahi %0,16\n" - " srl %1,16\n" - "0: tml %1,0x00ff\n" - " jno 1f\n" - " ahi %0,8\n" - " srl %1,8\n" - "1: nr %1,%2\n" - " ic %1,0(%1,%3)\n" - " alr %0,%1" - : "=&d" (set), "+a" (bitvec), "=&d" (reg) - : "a" (&_zb_findmap) : "cc" ); - if (set < (32 - bit)) - return set + offset; - offset += 32 - bit; - p++; - } - /* - * No zero yet, search remaining full words for a zero - */ - res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr)); - return (offset + res); -} - -static inline int -find_next_bit (const unsigned long * addr, int size, int offset) -{ - unsigned long * p = ((unsigned long *) addr) + (offset >> 5); - unsigned long bitvec, reg; - int set, bit = offset & 31, res; - - if (bit) { - /* - * Look for set bit in first word - */ - bitvec = (*p) >> bit; - __asm__(" slr %0,%0\n" - " lhi %2,0xff\n" - " tml %1,0xffff\n" - " jnz 0f\n" - " ahi %0,16\n" - " srl %1,16\n" - "0: tml %1,0x00ff\n" - " jnz 1f\n" - " ahi %0,8\n" - " srl %1,8\n" - "1: nr %1,%2\n" - " ic %1,0(%1,%3)\n" - " alr %0,%1" - : "=&d" (set), "+a" (bitvec), "=&d" (reg) - : "a" (&_sb_findmap) : "cc" ); - if (set < (32 - bit)) - return set + offset; - offset += 32 - bit; - p++; - } - /* - * No set bit yet, search remaining full words for a bit - */ - res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr)); - return (offset + res); -} - #else /* __s390x__ */ -/* - * Find-bit routines.. - */ static inline unsigned long find_first_zero_bit(const unsigned long * addr, unsigned long size) { @@ -712,7 +682,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size) " srlg %2,%2,6\n" "0: cg %1,0(%0,%4)\n" " jne 1f\n" - " aghi %0,8\n" + " la %0,8(%0)\n" " brct %2,0b\n" " lgr %0,%3\n" " j 5f\n" @@ -785,143 +755,66 @@ find_first_bit(const unsigned long * addr, unsigned long size) return (res < size) ? res : size; } -static inline unsigned long -find_next_zero_bit (const unsigned long * addr, unsigned long size, unsigned long offset) -{ - unsigned long * p = ((unsigned long *) addr) + (offset >> 6); - unsigned long bitvec, reg; - unsigned long set, bit = offset & 63, res; - - if (bit) { - /* - * Look for zero in first word - */ - bitvec = (*p) >> bit; - __asm__(" lhi %2,-1\n" - " slgr %0,%0\n" - " clr %1,%2\n" - " jne 0f\n" - " aghi %0,32\n" - " srlg %1,%1,32\n" - "0: lghi %2,0xff\n" - " tmll %1,0xffff\n" - " jno 1f\n" - " aghi %0,16\n" - " srlg %1,%1,16\n" - "1: tmll %1,0x00ff\n" - " jno 2f\n" - " aghi %0,8\n" - " srlg %1,%1,8\n" - "2: ngr %1,%2\n" - " ic %1,0(%1,%3)\n" - " algr %0,%1" - : "=&d" (set), "+a" (bitvec), "=&d" (reg) - : "a" (&_zb_findmap) : "cc" ); - if (set < (64 - bit)) - return set + offset; - offset += 64 - bit; - p++; - } - /* - * No zero yet, search remaining full words for a zero - */ - res = find_first_zero_bit (p, size - 64 * (p - (unsigned long *) addr)); - return (offset + res); -} - -static inline unsigned long -find_next_bit (const unsigned long * addr, unsigned long size, unsigned long offset) -{ - unsigned long * p = ((unsigned long *) addr) + (offset >> 6); - unsigned long bitvec, reg; - unsigned long set, bit = offset & 63, res; - - if (bit) { - /* - * Look for zero in first word - */ - bitvec = (*p) >> bit; - __asm__(" slgr %0,%0\n" - " ltr %1,%1\n" - " jnz 0f\n" - " aghi %0,32\n" - " srlg %1,%1,32\n" - "0: lghi %2,0xff\n" - " tmll %1,0xffff\n" - " jnz 1f\n" - " aghi %0,16\n" - " srlg %1,%1,16\n" - "1: tmll %1,0x00ff\n" - " jnz 2f\n" - " aghi %0,8\n" - " srlg %1,%1,8\n" - "2: ngr %1,%2\n" - " ic %1,0(%1,%3)\n" - " algr %0,%1" - : "=&d" (set), "+a" (bitvec), "=&d" (reg) - : "a" (&_sb_findmap) : "cc" ); - if (set < (64 - bit)) - return set + offset; - offset += 64 - bit; - p++; - } - /* - * No set bit yet, search remaining full words for a bit - */ - res = find_first_bit (p, size - 64 * (p - (unsigned long *) addr)); - return (offset + res); -} - #endif /* __s390x__ */ -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static inline unsigned long ffz(unsigned long word) +static inline int +find_next_zero_bit (const unsigned long * addr, unsigned long size, + unsigned long offset) { - unsigned long bit = 0; - -#ifdef __s390x__ - if (likely((word & 0xffffffff) == 0xffffffff)) { - word >>= 32; - bit += 32; - } -#endif - if (likely((word & 0xffff) == 0xffff)) { - word >>= 16; - bit += 16; + const unsigned long *p; + unsigned long bit, set; + + if (offset >= size) + return size; + bit = offset & (__BITOPS_WORDSIZE - 1); + offset -= bit; + size -= offset; + p = addr + offset / __BITOPS_WORDSIZE; + if (bit) { + /* + * s390 version of ffz returns __BITOPS_WORDSIZE + * if no zero bit is present in the word. + */ + set = ffz(*p >> bit) + bit; + if (set >= size) + return size + offset; + if (set < __BITOPS_WORDSIZE) + return set + offset; + offset += __BITOPS_WORDSIZE; + size -= __BITOPS_WORDSIZE; + p++; } - if (likely((word & 0xff) == 0xff)) { - word >>= 8; - bit += 8; - } - return bit + _zb_findmap[word & 0xff]; + return offset + find_first_zero_bit(p, size); } -/* - * __ffs = find first bit in word. Undefined if no bit exists, - * so code should check against 0UL first.. - */ -static inline unsigned long __ffs (unsigned long word) +static inline int +find_next_bit (const unsigned long * addr, unsigned long size, + unsigned long offset) { - unsigned long bit = 0; - -#ifdef __s390x__ - if (likely((word & 0xffffffff) == 0)) { - word >>= 32; - bit += 32; + const unsigned long *p; + unsigned long bit, set; + + if (offset >= size) + return size; + bit = offset & (__BITOPS_WORDSIZE - 1); + offset -= bit; + size -= offset; + p = addr + offset / __BITOPS_WORDSIZE; + if (bit) { + /* + * s390 version of __ffs returns __BITOPS_WORDSIZE + * if no one bit is present in the word. + */ + set = __ffs(*p & (~0UL << bit)); + if (set >= size) + return size + offset; + if (set < __BITOPS_WORDSIZE) + return set + offset; + offset += __BITOPS_WORDSIZE; + size -= __BITOPS_WORDSIZE; + p++; } -#endif - if (likely((word & 0xffff) == 0)) { - word >>= 16; - bit += 16; - } - if (likely((word & 0xff) == 0)) { - word >>= 8; - bit += 8; - } - return bit + _sb_findmap[word & 0xff]; + return offset + find_first_bit(p, size); } /* @@ -1031,49 +924,6 @@ ext2_find_first_zero_bit(void *vaddr, unsigned int size) return (res < size) ? res : size; } -static inline int -ext2_find_next_zero_bit(void *vaddr, unsigned int size, unsigned offset) -{ - unsigned long *addr = vaddr; - unsigned long *p = addr + (offset >> 5); - unsigned long word, reg; - unsigned int bit = offset & 31UL, res; - - if (offset >= size) - return size; - - if (bit) { - __asm__(" ic %0,0(%1)\n" - " icm %0,2,1(%1)\n" - " icm %0,4,2(%1)\n" - " icm %0,8,3(%1)" - : "=&a" (word) : "a" (p) : "cc" ); - word >>= bit; - res = bit; - /* Look for zero in first longword */ - __asm__(" lhi %2,0xff\n" - " tml %1,0xffff\n" - " jno 0f\n" - " ahi %0,16\n" - " srl %1,16\n" - "0: tml %1,0x00ff\n" - " jno 1f\n" - " ahi %0,8\n" - " srl %1,8\n" - "1: nr %1,%2\n" - " ic %1,0(%1,%3)\n" - " alr %0,%1" - : "+&d" (res), "+&a" (word), "=&d" (reg) - : "a" (&_zb_findmap) : "cc" ); - if (res < 32) - return (p - addr)*32 + res; - p++; - } - /* No zero yet, search remaining full bytes for a zero */ - res = ext2_find_first_zero_bit (p, size - 32 * (p - addr)); - return (p - addr) * 32 + res; -} - #else /* __s390x__ */ static inline unsigned long @@ -1120,56 +970,46 @@ ext2_find_first_zero_bit(void *vaddr, unsigned long size) return (res < size) ? res : size; } -static inline unsigned long +#endif /* __s390x__ */ + +static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) { - unsigned long *addr = vaddr; - unsigned long *p = addr + (offset >> 6); - unsigned long word, reg; - unsigned long bit = offset & 63UL, res; + unsigned long *addr = vaddr, *p; + unsigned long word, bit, set; if (offset >= size) return size; - + bit = offset & (__BITOPS_WORDSIZE - 1); + offset -= bit; + size -= offset; + p = addr + offset / __BITOPS_WORDSIZE; if (bit) { - __asm__(" lrvg %0,%1" /* load reversed, neat instruction */ - : "=a" (word) : "m" (*p) ); - word >>= bit; - res = bit; - /* Look for zero in first 8 byte word */ - __asm__(" lghi %2,0xff\n" - " tmll %1,0xffff\n" - " jno 2f\n" - " ahi %0,16\n" - " srlg %1,%1,16\n" - "0: tmll %1,0xffff\n" - " jno 2f\n" - " ahi %0,16\n" - " srlg %1,%1,16\n" - "1: tmll %1,0xffff\n" - " jno 2f\n" - " ahi %0,16\n" - " srl %1,16\n" - "2: tmll %1,0x00ff\n" - " jno 3f\n" - " ahi %0,8\n" - " srl %1,8\n" - "3: ngr %1,%2\n" - " ic %1,0(%1,%3)\n" - " alr %0,%1" - : "+&d" (res), "+a" (word), "=&d" (reg) - : "a" (&_zb_findmap) : "cc" ); - if (res < 64) - return (p - addr)*64 + res; - p++; +#ifndef __s390x__ + asm(" ic %0,0(%1)\n" + " icm %0,2,1(%1)\n" + " icm %0,4,2(%1)\n" + " icm %0,8,3(%1)" + : "=&a" (word) : "a" (p), "m" (*p) : "cc" ); +#else + asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) ); +#endif + /* + * s390 version of ffz returns __BITOPS_WORDSIZE + * if no zero bit is present in the word. + */ + set = ffz(word >> bit) + bit; + if (set >= size) + return size + offset; + if (set < __BITOPS_WORDSIZE) + return set + offset; + offset += __BITOPS_WORDSIZE; + size -= __BITOPS_WORDSIZE; + p++; } - /* No zero yet, search remaining full bytes for a zero */ - res = ext2_find_first_zero_bit (p, size - 64 * (p - addr)); - return (p - addr) * 64 + res; + return offset + ext2_find_first_zero_bit(p, size); } -#endif /* __s390x__ */ - /* Bitmap functions for the minix filesystem. */ /* FIXME !!! */ #define minix_test_and_set_bit(nr,addr) \ diff --git a/include/asm-s390/cpcmd.h b/include/asm-s390/cpcmd.h index 1d33c5da083e..1fcf65be7a23 100644 --- a/include/asm-s390/cpcmd.h +++ b/include/asm-s390/cpcmd.h @@ -11,14 +11,28 @@ #define __CPCMD__ /* + * the lowlevel function for cpcmd * the caller of __cpcmd has to ensure that the response buffer is below 2 GB */ -extern void __cpcmd(char *cmd, char *response, int rlen); +extern int __cpcmd(const char *cmd, char *response, int rlen, int *response_code); #ifndef __s390x__ #define cpcmd __cpcmd #else -extern void cpcmd(char *cmd, char *response, int rlen); +/* + * cpcmd is the in-kernel interface for issuing CP commands + * + * cmd: null-terminated command string, max 240 characters + * response: response buffer for VM's textual response + * rlen: size of the response buffer, cpcmd will not exceed this size + * but will cap the output, if its too large. Everything that + * did not fit into the buffer will be silently dropped + * response_code: return pointer for VM's error code + * return value: the size of the response. The caller can check if the buffer + * was large enough by comparing the return value and rlen + * NOTE: If the response buffer is not below 2 GB, cpcmd can sleep + */ +extern int cpcmd(const char *cmd, char *response, int rlen, int *response_code); #endif /*__s390x__*/ #endif diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h index 6bbcdea42a86..92360d90144b 100644 --- a/include/asm-s390/debug.h +++ b/include/asm-s390/debug.h @@ -9,6 +9,8 @@ #ifndef DEBUG_H #define DEBUG_H +#include <linux/config.h> +#include <linux/fs.h> #include <linux/string.h> /* Note: @@ -31,19 +33,18 @@ struct __debug_entry{ } __attribute__((packed)); -#define __DEBUG_FEATURE_VERSION 1 /* version of debug feature */ +#define __DEBUG_FEATURE_VERSION 2 /* version of debug feature */ #ifdef __KERNEL__ #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/time.h> -#include <linux/proc_fs.h> #define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ #define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */ #define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */ #define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */ -#define DEBUG_MAX_PROCF_LEN 64 /* max length for a proc file name */ +#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */ #define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */ #define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */ @@ -64,16 +65,17 @@ typedef struct debug_info { spinlock_t lock; int level; int nr_areas; - int page_order; + int pages_per_area; int buf_size; int entry_size; - debug_entry_t** areas; + debug_entry_t*** areas; int active_area; - int *active_entry; - struct proc_dir_entry* proc_root_entry; - struct proc_dir_entry* proc_entries[DEBUG_MAX_VIEWS]; + int *active_pages; + int *active_entries; + struct dentry* debugfs_root_entry; + struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; struct debug_view* views[DEBUG_MAX_VIEWS]; - char name[DEBUG_MAX_PROCF_LEN]; + char name[DEBUG_MAX_NAME_LEN]; } debug_info_t; typedef int (debug_header_proc_t) (debug_info_t* id, @@ -98,7 +100,7 @@ int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view, int area, debug_entry_t* entry, char* out_buf); struct debug_view { - char name[DEBUG_MAX_PROCF_LEN]; + char name[DEBUG_MAX_NAME_LEN]; debug_prolog_proc_t* prolog_proc; debug_header_proc_t* header_proc; debug_format_proc_t* format_proc; @@ -120,7 +122,7 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level, /* Debug Feature API: */ -debug_info_t* debug_register(char* name, int pages_index, int nr_areas, +debug_info_t* debug_register(char* name, int pages, int nr_areas, int buf_size); void debug_unregister(debug_info_t* id); @@ -132,7 +134,8 @@ void debug_stop_all(void); extern inline debug_entry_t* debug_event(debug_info_t* id, int level, void* data, int length) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,data,length); } @@ -140,7 +143,8 @@ extern inline debug_entry_t* debug_int_event(debug_info_t* id, int level, unsigned int tag) { unsigned int t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,&t,sizeof(unsigned int)); } @@ -148,14 +152,16 @@ extern inline debug_entry_t * debug_long_event (debug_info_t* id, int level, unsigned long tag) { unsigned long t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,&t,sizeof(unsigned long)); } extern inline debug_entry_t* debug_text_event(debug_info_t* id, int level, const char* txt) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,txt,strlen(txt)); } @@ -167,7 +173,8 @@ debug_sprintf_event(debug_info_t* id,int level,char *string,...) extern inline debug_entry_t* debug_exception(debug_info_t* id, int level, void* data, int length) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,data,length); } @@ -175,7 +182,8 @@ extern inline debug_entry_t* debug_int_exception(debug_info_t* id, int level, unsigned int tag) { unsigned int t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,&t,sizeof(unsigned int)); } @@ -183,14 +191,16 @@ extern inline debug_entry_t * debug_long_exception (debug_info_t* id, int level, unsigned long tag) { unsigned long t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,&t,sizeof(unsigned long)); } extern inline debug_entry_t* debug_text_exception(debug_info_t* id, int level, const char* txt) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,txt,strlen(txt)); } diff --git a/include/asm-s390/emergency-restart.h b/include/asm-s390/emergency-restart.h new file mode 100644 index 000000000000..108d8c48e42e --- /dev/null +++ b/include/asm-s390/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +#include <asm-generic/emergency-restart.h> + +#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h new file mode 100644 index 000000000000..54cf7d9f251c --- /dev/null +++ b/include/asm-s390/kexec.h @@ -0,0 +1,42 @@ +/* + * include/asm-s390/kexec.h + * + * (C) Copyright IBM Corp. 2005 + * + * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com> + * + */ + +#ifndef _S390_KEXEC_H +#define _S390_KEXEC_H + +#include <asm/page.h> +#include <asm/processor.h> +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control pages */ +/* Not more than 2GB */ +#define KEXEC_CONTROL_MEMORY_LIMIT (1<<31) + +/* Allocate one page for the pdp and the second for the code */ +#define KEXEC_CONTROL_CODE_SIZE 4096 + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_S390 + +#define MAX_NOTE_BYTES 1024 +typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; + +extern note_buf_t crash_notes[]; + +#endif /*_S390_KEXEC_H */ diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index df5172fc589d..afe6a9f9b0ae 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -90,7 +90,6 @@ #define __LC_SYSTEM_TIMER 0x278 #define __LC_LAST_UPDATE_CLOCK 0x280 #define __LC_STEAL_CLOCK 0x288 -#define __LC_DIAG44_OPCODE 0x290 #define __LC_KERNEL_STACK 0xD40 #define __LC_THREAD_INFO 0xD48 #define __LC_ASYNC_STACK 0xD50 @@ -109,10 +108,14 @@ #ifndef __s390x__ #define __LC_PFAULT_INTPARM 0x080 +#define __LC_CPU_TIMER_SAVE_AREA 0x0D8 #define __LC_AREGS_SAVE_AREA 0x120 +#define __LC_GPREGS_SAVE_AREA 0x180 #define __LC_CREGS_SAVE_AREA 0x1C0 #else /* __s390x__ */ #define __LC_PFAULT_INTPARM 0x11B8 +#define __LC_GPREGS_SAVE_AREA 0x1280 +#define __LC_CPU_TIMER_SAVE_AREA 0x1328 #define __LC_AREGS_SAVE_AREA 0x1340 #define __LC_CREGS_SAVE_AREA 0x1380 #endif /* __s390x__ */ @@ -167,7 +170,8 @@ struct _lowcore __u16 subchannel_nr; /* 0x0ba */ __u32 io_int_parm; /* 0x0bc */ __u32 io_int_word; /* 0x0c0 */ - __u8 pad3[0xD8-0xC4]; /* 0x0c4 */ + __u8 pad3[0xD4-0xC4]; /* 0x0c4 */ + __u32 extended_save_area_addr; /* 0x0d4 */ __u32 cpu_timer_save_area[2]; /* 0x0d8 */ __u32 clock_comp_save_area[2]; /* 0x0e0 */ __u32 mcck_interruption_code[2]; /* 0x0e8 */ @@ -281,8 +285,7 @@ struct _lowcore __u64 system_timer; /* 0x278 */ __u64 last_update_clock; /* 0x280 */ __u64 steal_clock; /* 0x288 */ - __u32 diag44_opcode; /* 0x290 */ - __u8 pad8[0xc00-0x294]; /* 0x294 */ + __u8 pad8[0xc00-0x290]; /* 0x290 */ /* System info area */ __u64 save_area[16]; /* 0xc00 */ __u8 pad9[0xd40-0xc80]; /* 0xc80 */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index fb46e9090b50..4ec652ebb3b1 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -203,10 +203,25 @@ unsigned long get_wchan(struct task_struct *p); # define cpu_relax() asm volatile ("diag 0,0,68" : : : "memory") #else /* __s390x__ */ # define cpu_relax() \ - asm volatile ("ex 0,%0" : : "i" (__LC_DIAG44_OPCODE) : "memory") + do { \ + if (MACHINE_HAS_DIAG44) \ + asm volatile ("diag 0,0,68" : : : "memory"); \ + } while (0) #endif /* __s390x__ */ /* + * Set PSW to specified value. + */ +static inline void __load_psw(psw_t psw) +{ +#ifndef __s390x__ + asm volatile ("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); +#else + asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); +#endif +} + +/* * Set PSW mask to specified value, while leaving the * PSW addr pointing to the next instruction. */ @@ -214,8 +229,8 @@ unsigned long get_wchan(struct task_struct *p); static inline void __load_psw_mask (unsigned long mask) { unsigned long addr; - psw_t psw; + psw.mask = mask; #ifndef __s390x__ @@ -241,30 +256,8 @@ static inline void __load_psw_mask (unsigned long mask) */ static inline void enabled_wait(void) { - unsigned long reg; - psw_t wait_psw; - - wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | - PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY; -#ifndef __s390x__ - asm volatile ( - " basr %0,0\n" - "0: la %0,1f-0b(%0)\n" - " st %0,4(%1)\n" - " oi 4(%1),0x80\n" - " lpsw 0(%1)\n" - "1:" - : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw) - : "memory", "cc" ); -#else /* __s390x__ */ - asm volatile ( - " larl %0,0f\n" - " stg %0,8(%1)\n" - " lpswe 0(%1)\n" - "0:" - : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw) - : "memory", "cc" ); -#endif /* __s390x__ */ + __load_psw_mask(PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | + PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY); } /* @@ -273,13 +266,11 @@ static inline void enabled_wait(void) static inline void disabled_wait(unsigned long code) { - char psw_buffer[2*sizeof(psw_t)]; unsigned long ctl_buf; - psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1) - & -sizeof(psw_t)); + psw_t dw_psw; - dw_psw->mask = PSW_BASE_BITS | PSW_MASK_WAIT; - dw_psw->addr = code; + dw_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; + dw_psw.addr = code; /* * Store status and then load disabled wait psw, * the processor is dead afterwards @@ -301,7 +292,7 @@ static inline void disabled_wait(unsigned long code) " oi 0x1c0,0x10\n" /* fake protection bit */ " lpsw 0(%1)" : "=m" (ctl_buf) - : "a" (dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); + : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); #else /* __s390x__ */ asm volatile (" stctg 0,0,0(%2)\n" " ni 4(%2),0xef\n" /* switch off protection */ @@ -333,7 +324,7 @@ static inline void disabled_wait(unsigned long code) " oi 0x384(1),0x10\n" /* fake protection bit */ " lpswe 0(%1)" : "=m" (ctl_buf) - : "a" (dw_psw), "a" (&ctl_buf), + : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); #endif /* __s390x__ */ } diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h index 4eff8f2e3bf1..fc7c96edc697 100644 --- a/include/asm-s390/ptrace.h +++ b/include/asm-s390/ptrace.h @@ -276,7 +276,7 @@ typedef struct #endif /* __s390x__ */ #define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ - PSW_DEFAULT_KEY) + PSW_MASK_MCHECK | PSW_DEFAULT_KEY) #define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \ PSW_MASK_PSTATE | PSW_DEFAULT_KEY) diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h index 53cc736b9820..8ff10300f7ee 100644 --- a/include/asm-s390/spinlock.h +++ b/include/asm-s390/spinlock.h @@ -11,21 +11,16 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#ifdef __s390x__ -/* - * Grmph, take care of %&#! user space programs that include - * asm/spinlock.h. The diagnose is only available in kernel - * context. - */ -#ifdef __KERNEL__ -#include <asm/lowcore.h> -#define __DIAG44_INSN "ex" -#define __DIAG44_OPERAND __LC_DIAG44_OPCODE -#else -#define __DIAG44_INSN "#" -#define __DIAG44_OPERAND 0 -#endif -#endif /* __s390x__ */ +static inline int +_raw_compare_and_swap(volatile unsigned int *lock, + unsigned int old, unsigned int new) +{ + asm volatile ("cs %0,%3,0(%4)" + : "=d" (old), "=m" (*lock) + : "0" (old), "d" (new), "a" (lock), "m" (*lock) + : "cc", "memory" ); + return old; +} /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -41,58 +36,35 @@ typedef struct { #endif } __attribute__ ((aligned (4))) spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } -#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } +#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) #define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock) -#define spin_is_locked(x) ((x)->lock != 0) +#define spin_is_locked(x) ((x)->lock != 0) #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) -extern inline void _raw_spin_lock(spinlock_t *lp) +extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc); +extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc); + +static inline void _raw_spin_lock(spinlock_t *lp) { -#ifndef __s390x__ - unsigned int reg1, reg2; - __asm__ __volatile__(" bras %0,1f\n" - "0: diag 0,0,68\n" - "1: slr %1,%1\n" - " cs %1,%0,0(%3)\n" - " jl 0b\n" - : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock) - : "a" (&lp->lock), "m" (lp->lock) - : "cc", "memory" ); -#else /* __s390x__ */ - unsigned long reg1, reg2; - __asm__ __volatile__(" bras %1,1f\n" - "0: " __DIAG44_INSN " 0,%4\n" - "1: slr %0,%0\n" - " cs %0,%1,0(%3)\n" - " jl 0b\n" - : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock) - : "a" (&lp->lock), "i" (__DIAG44_OPERAND), - "m" (lp->lock) : "cc", "memory" ); -#endif /* __s390x__ */ + unsigned long pc = (unsigned long) __builtin_return_address(0); + + if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0)) + _raw_spin_lock_wait(lp, pc); } -extern inline int _raw_spin_trylock(spinlock_t *lp) +static inline int _raw_spin_trylock(spinlock_t *lp) { - unsigned long reg; - unsigned int result; - - __asm__ __volatile__(" basr %1,0\n" - "0: cs %0,%1,0(%3)" - : "=d" (result), "=&d" (reg), "=m" (lp->lock) - : "a" (&lp->lock), "m" (lp->lock), "0" (0) - : "cc", "memory" ); - return !result; + unsigned long pc = (unsigned long) __builtin_return_address(0); + + if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0)) + return 1; + return _raw_spin_trylock_retry(lp, pc); } -extern inline void _raw_spin_unlock(spinlock_t *lp) +static inline void _raw_spin_unlock(spinlock_t *lp) { - unsigned int old; - - __asm__ __volatile__("cs %0,%3,0(%4)" - : "=d" (old), "=m" (lp->lock) - : "0" (lp->lock), "d" (0), "a" (lp) - : "cc", "memory" ); + _raw_compare_and_swap(&lp->lock, lp->lock, 0); } /* @@ -106,7 +78,7 @@ extern inline void _raw_spin_unlock(spinlock_t *lp) * read-locks. */ typedef struct { - volatile unsigned long lock; + volatile unsigned int lock; volatile unsigned long owner_pc; #ifdef CONFIG_PREEMPT unsigned int break_lock; @@ -129,123 +101,55 @@ typedef struct { */ #define write_can_lock(x) ((x)->lock == 0) -#ifndef __s390x__ -#define _raw_read_lock(rw) \ - asm volatile(" l 2,0(%1)\n" \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: la 2,0(2)\n" /* clear high (=write) bit */ \ - " la 3,1(2)\n" /* one more reader */ \ - " cs 2,3,0(%1)\n" /* try to write new value */ \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_read_lock(rw) \ - asm volatile(" lg 2,0(%1)\n" \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: nihh 2,0x7fff\n" /* clear high (=write) bit */ \ - " la 3,1(2)\n" /* one more reader */ \ - " csg 2,3,0(%1)\n" /* try to write new value */ \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#ifndef __s390x__ -#define _raw_read_unlock(rw) \ - asm volatile(" l 2,0(%1)\n" \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: lr 3,2\n" \ - " ahi 3,-1\n" /* one less reader */ \ - " cs 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_read_unlock(rw) \ - asm volatile(" lg 2,0(%1)\n" \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: lgr 3,2\n" \ - " bctgr 3,0\n" /* one less reader */ \ - " csg 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#ifndef __s390x__ -#define _raw_write_lock(rw) \ - asm volatile(" lhi 3,1\n" \ - " sll 3,31\n" /* new lock value = 0x80000000 */ \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: slr 2,2\n" /* old lock value must be 0 */ \ - " cs 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_write_lock(rw) \ - asm volatile(" llihh 3,0x8000\n" /* new lock value = 0x80...0 */ \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: slgr 2,2\n" /* old lock value must be 0 */ \ - " csg 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#ifndef __s390x__ -#define _raw_write_unlock(rw) \ - asm volatile(" slr 3,3\n" /* new lock value = 0 */ \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: lhi 2,1\n" \ - " sll 2,31\n" /* old lock value must be 0x80000000 */ \ - " cs 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_write_unlock(rw) \ - asm volatile(" slgr 3,3\n" /* new lock value = 0 */ \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: llihh 2,0x8000\n" /* old lock value must be 0x8..0 */\ - " csg 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) - -extern inline int _raw_write_trylock(rwlock_t *rw) +extern void _raw_read_lock_wait(rwlock_t *lp); +extern int _raw_read_trylock_retry(rwlock_t *lp); +extern void _raw_write_lock_wait(rwlock_t *lp); +extern int _raw_write_trylock_retry(rwlock_t *lp); + +static inline void _raw_read_lock(rwlock_t *rw) +{ + unsigned int old; + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + _raw_read_lock_wait(rw); +} + +static inline void _raw_read_unlock(rwlock_t *rw) +{ + unsigned int old, cmp; + + old = rw->lock; + do { + cmp = old; + old = _raw_compare_and_swap(&rw->lock, old, old - 1); + } while (cmp != old); +} + +static inline void _raw_write_lock(rwlock_t *rw) +{ + if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + _raw_write_lock_wait(rw); +} + +static inline void _raw_write_unlock(rwlock_t *rw) +{ + _raw_compare_and_swap(&rw->lock, 0x80000000, 0); +} + +static inline int _raw_read_trylock(rwlock_t *rw) +{ + unsigned int old; + old = rw->lock & 0x7fffffffU; + if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) + return 1; + return _raw_read_trylock_retry(rw); +} + +static inline int _raw_write_trylock(rwlock_t *rw) { - unsigned long result, reg; - - __asm__ __volatile__( -#ifndef __s390x__ - " lhi %1,1\n" - " sll %1,31\n" - " cs %0,%1,0(%3)" -#else /* __s390x__ */ - " llihh %1,0x8000\n" - "0: csg %0,%1,0(%3)\n" -#endif /* __s390x__ */ - : "=d" (result), "=&d" (reg), "=m" (rw->lock) - : "a" (&rw->lock), "m" (rw->lock), "0" (0UL) - : "cc", "memory" ); - return result == 0; + if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) + return 1; + return _raw_write_trylock_retry(rw); } #endif /* __ASM_SPINLOCK_H */ diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 81514d76edcf..864cae7e1fd6 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -16,6 +16,7 @@ #include <asm/types.h> #include <asm/ptrace.h> #include <asm/setup.h> +#include <asm/processor.h> #ifdef __KERNEL__ @@ -103,29 +104,16 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -#define prepare_arch_switch(rq, next) do { } while(0) -#define task_running(rq, p) ((rq)->curr == (p)) - #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void account_user_vtime(struct task_struct *); extern void account_system_vtime(struct task_struct *); +#endif -#define finish_arch_switch(rq, prev) do { \ +#define finish_arch_switch(prev) do { \ set_fs(current->thread.mm_segment); \ - spin_unlock(&(rq)->lock); \ account_system_vtime(prev); \ - local_irq_enable(); \ } while (0) -#else - -#define finish_arch_switch(rq, prev) do { \ - set_fs(current->thread.mm_segment); \ - spin_unlock_irq(&(rq)->lock); \ -} while (0) - -#endif - #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,x) \ @@ -331,9 +319,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #ifdef __s390x__ -#define __load_psw(psw) \ - __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); - #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ __asm__ __volatile__ ( \ @@ -390,9 +375,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #else /* __s390x__ */ -#define __load_psw(psw) \ - __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" ); - #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ __asm__ __volatile__ ( \ @@ -451,6 +433,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) /* For spinlocks etc */ #define local_irq_save(x) ((x) = local_irq_disable()) +/* + * Use to set psw mask except for the first byte which + * won't be changed by this function. + */ +static inline void +__set_psw_mask(unsigned long mask) +{ + local_save_flags(mask); + __load_psw_mask(mask); +} + +#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) +#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) + #ifdef CONFIG_SMP extern void smp_ctl_set_bit(int cr, int bit); diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h index aade85c53a63..6c18a3f24316 100644 --- a/include/asm-s390/thread_info.h +++ b/include/asm-s390/thread_info.h @@ -50,7 +50,7 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ unsigned int cpu; /* current CPU */ - unsigned int preempt_count; /* 0 => preemptable */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; }; @@ -96,6 +96,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ +#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -109,6 +110,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) +#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) #define _TIF_USEDFPU (1<<TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_31BIT (1<<TIF_31BIT) diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h index a7f43a251f81..3e3bfe6a8fa8 100644 --- a/include/asm-s390/uaccess.h +++ b/include/asm-s390/uaccess.h @@ -149,11 +149,11 @@ struct exception_table_entry }) #endif -#ifndef __CHECKER__ #define __put_user(x, ptr) \ ({ \ __typeof__(*(ptr)) __x = (x); \ int __pu_err; \ + __chk_user_ptr(ptr); \ switch (sizeof (*(ptr))) { \ case 1: \ case 2: \ @@ -167,14 +167,6 @@ struct exception_table_entry } \ __pu_err; \ }) -#else -#define __put_user(x, ptr) \ -({ \ - void __user *p; \ - p = (ptr); \ - 0; \ -}) -#endif #define put_user(x, ptr) \ ({ \ @@ -213,11 +205,11 @@ extern int __put_user_bad(void) __attribute__((noreturn)); }) #endif -#ifndef __CHECKER__ #define __get_user(x, ptr) \ ({ \ __typeof__(*(ptr)) __x; \ int __gu_err; \ + __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: \ case 2: \ @@ -232,15 +224,6 @@ extern int __put_user_bad(void) __attribute__((noreturn)); (x) = __x; \ __gu_err; \ }) -#else -#define __get_user(x, ptr) \ -({ \ - void __user *p; \ - p = (ptr); \ - 0; \ -}) -#endif - #define get_user(x, ptr) \ ({ \ diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h index f1a204f7c0f0..221e965da924 100644 --- a/include/asm-s390/unistd.h +++ b/include/asm-s390/unistd.h @@ -269,13 +269,18 @@ #define __NR_mq_timedreceive 274 #define __NR_mq_notify 275 #define __NR_mq_getsetattr 276 -/* Number 277 is reserved for new sys_kexec_load */ +#define __NR_kexec_load 277 #define __NR_add_key 278 #define __NR_request_key 279 #define __NR_keyctl 280 #define __NR_waitid 281 +#define __NR_ioprio_set 282 +#define __NR_ioprio_get 283 +#define __NR_inotify_init 284 +#define __NR_inotify_add_watch 285 +#define __NR_inotify_rm_watch 286 -#define NR_syscalls 282 +#define NR_syscalls 287 /* * There are some system calls that are not present on 64 bit, some |