From dcd497f99a1ef29a7c5e76142965be77e9dacabd Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Thu, 23 Jun 2005 00:09:07 -0700 Subject: [PATCH] streamline preempt_count type across archs The preempt_count member of struct thread_info is currently either defined as int, unsigned int or __s32 depending on arch. This patch makes the type of preempt_count an int on all archs. Having preempt_count be an unsigned type prevents the catching of preempt_count < 0 bugs, and using int on some archs and __s32 on others is not exactely "neat" - much nicer when it's just int all over. A previous version of this patch was already ACK'ed by Robert Love, and the only change in this version of the patch compared to the one he ACK'ed is that this one also makes sure the preempt_count member is consistently commented. Signed-off-by: Jesper Juhl Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/thread_info.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h index aade85c53a63..fe101d41e849 100644 --- a/include/asm-s390/thread_info.h +++ b/include/asm-s390/thread_info.h @@ -50,7 +50,7 @@ struct thread_info { struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ unsigned int cpu; /* current CPU */ - unsigned int preempt_count; /* 0 => preemptable */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; }; -- cgit v1.2.1 From 77fa22450de00d535de2cc8be653983560828000 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sat, 25 Jun 2005 14:55:30 -0700 Subject: [PATCH] s390: improved machine check handling Improved machine check handling. Kernel is now able to receive machine checks while in kernel mode (system call, interrupt and program check handling). Also register validation is now performed. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/lowcore.h | 7 +++++- include/asm-s390/processor.h | 52 ++++++++++++++++-------------------------- include/asm-s390/ptrace.h | 2 +- include/asm-s390/system.h | 21 ++++++++++++----- include/asm-s390/thread_info.h | 2 ++ 5 files changed, 44 insertions(+), 40 deletions(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index df5172fc589d..76b5b19c0ae2 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -109,10 +109,14 @@ #ifndef __s390x__ #define __LC_PFAULT_INTPARM 0x080 +#define __LC_CPU_TIMER_SAVE_AREA 0x0D8 #define __LC_AREGS_SAVE_AREA 0x120 +#define __LC_GPREGS_SAVE_AREA 0x180 #define __LC_CREGS_SAVE_AREA 0x1C0 #else /* __s390x__ */ #define __LC_PFAULT_INTPARM 0x11B8 +#define __LC_GPREGS_SAVE_AREA 0x1280 +#define __LC_CPU_TIMER_SAVE_AREA 0x1328 #define __LC_AREGS_SAVE_AREA 0x1340 #define __LC_CREGS_SAVE_AREA 0x1380 #endif /* __s390x__ */ @@ -167,7 +171,8 @@ struct _lowcore __u16 subchannel_nr; /* 0x0ba */ __u32 io_int_parm; /* 0x0bc */ __u32 io_int_word; /* 0x0c0 */ - __u8 pad3[0xD8-0xC4]; /* 0x0c4 */ + __u8 pad3[0xD4-0xC4]; /* 0x0c4 */ + __u32 extended_save_area_addr; /* 0x0d4 */ __u32 cpu_timer_save_area[2]; /* 0x0d8 */ __u32 clock_comp_save_area[2]; /* 0x0e0 */ __u32 mcck_interruption_code[2]; /* 0x0e8 */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index fb46e9090b50..8bd14de69e35 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -206,6 +206,18 @@ unsigned long get_wchan(struct task_struct *p); asm volatile ("ex 0,%0" : : "i" (__LC_DIAG44_OPCODE) : "memory") #endif /* __s390x__ */ +/* + * Set PSW to specified value. + */ +static inline void __load_psw(psw_t psw) +{ +#ifndef __s390x__ + asm volatile ("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); +#else + asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); +#endif +} + /* * Set PSW mask to specified value, while leaving the * PSW addr pointing to the next instruction. @@ -214,8 +226,8 @@ unsigned long get_wchan(struct task_struct *p); static inline void __load_psw_mask (unsigned long mask) { unsigned long addr; - psw_t psw; + psw.mask = mask; #ifndef __s390x__ @@ -241,30 +253,8 @@ static inline void __load_psw_mask (unsigned long mask) */ static inline void enabled_wait(void) { - unsigned long reg; - psw_t wait_psw; - - wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | - PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY; -#ifndef __s390x__ - asm volatile ( - " basr %0,0\n" - "0: la %0,1f-0b(%0)\n" - " st %0,4(%1)\n" - " oi 4(%1),0x80\n" - " lpsw 0(%1)\n" - "1:" - : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw) - : "memory", "cc" ); -#else /* __s390x__ */ - asm volatile ( - " larl %0,0f\n" - " stg %0,8(%1)\n" - " lpswe 0(%1)\n" - "0:" - : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw) - : "memory", "cc" ); -#endif /* __s390x__ */ + __load_psw_mask(PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | + PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY); } /* @@ -273,13 +263,11 @@ static inline void enabled_wait(void) static inline void disabled_wait(unsigned long code) { - char psw_buffer[2*sizeof(psw_t)]; unsigned long ctl_buf; - psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1) - & -sizeof(psw_t)); + psw_t dw_psw; - dw_psw->mask = PSW_BASE_BITS | PSW_MASK_WAIT; - dw_psw->addr = code; + dw_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; + dw_psw.addr = code; /* * Store status and then load disabled wait psw, * the processor is dead afterwards @@ -301,7 +289,7 @@ static inline void disabled_wait(unsigned long code) " oi 0x1c0,0x10\n" /* fake protection bit */ " lpsw 0(%1)" : "=m" (ctl_buf) - : "a" (dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); + : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); #else /* __s390x__ */ asm volatile (" stctg 0,0,0(%2)\n" " ni 4(%2),0xef\n" /* switch off protection */ @@ -333,7 +321,7 @@ static inline void disabled_wait(unsigned long code) " oi 0x384(1),0x10\n" /* fake protection bit */ " lpswe 0(%1)" : "=m" (ctl_buf) - : "a" (dw_psw), "a" (&ctl_buf), + : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); #endif /* __s390x__ */ } diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h index 4eff8f2e3bf1..fc7c96edc697 100644 --- a/include/asm-s390/ptrace.h +++ b/include/asm-s390/ptrace.h @@ -276,7 +276,7 @@ typedef struct #endif /* __s390x__ */ #define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ - PSW_DEFAULT_KEY) + PSW_MASK_MCHECK | PSW_DEFAULT_KEY) #define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \ PSW_MASK_PSTATE | PSW_DEFAULT_KEY) diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 81514d76edcf..e3cb3ce1d24a 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef __KERNEL__ @@ -331,9 +332,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #ifdef __s390x__ -#define __load_psw(psw) \ - __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); - #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ __asm__ __volatile__ ( \ @@ -390,9 +388,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #else /* __s390x__ */ -#define __load_psw(psw) \ - __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" ); - #define __ctl_load(array, low, high) ({ \ typedef struct { char _[sizeof(array)]; } addrtype; \ __asm__ __volatile__ ( \ @@ -451,6 +446,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) /* For spinlocks etc */ #define local_irq_save(x) ((x) = local_irq_disable()) +/* + * Use to set psw mask except for the first byte which + * won't be changed by this function. + */ +static inline void +__set_psw_mask(unsigned long mask) +{ + local_save_flags(mask); + __load_psw_mask(mask); +} + +#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) +#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) + #ifdef CONFIG_SMP extern void smp_ctl_set_bit(int cr, int bit); diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h index fe101d41e849..6c18a3f24316 100644 --- a/include/asm-s390/thread_info.h +++ b/include/asm-s390/thread_info.h @@ -96,6 +96,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ +#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -109,6 +110,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_RESTART_SVC (1< Date: Sat, 25 Jun 2005 14:55:32 -0700 Subject: [PATCH] s390: add vmcp interface Add interface to issue VM control program commands. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/cpcmd.h | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/cpcmd.h b/include/asm-s390/cpcmd.h index 1d33c5da083e..1fcf65be7a23 100644 --- a/include/asm-s390/cpcmd.h +++ b/include/asm-s390/cpcmd.h @@ -11,14 +11,28 @@ #define __CPCMD__ /* + * the lowlevel function for cpcmd * the caller of __cpcmd has to ensure that the response buffer is below 2 GB */ -extern void __cpcmd(char *cmd, char *response, int rlen); +extern int __cpcmd(const char *cmd, char *response, int rlen, int *response_code); #ifndef __s390x__ #define cpcmd __cpcmd #else -extern void cpcmd(char *cmd, char *response, int rlen); +/* + * cpcmd is the in-kernel interface for issuing CP commands + * + * cmd: null-terminated command string, max 240 characters + * response: response buffer for VM's textual response + * rlen: size of the response buffer, cpcmd will not exceed this size + * but will cap the output, if its too large. Everything that + * did not fit into the buffer will be silently dropped + * response_code: return pointer for VM's error code + * return value: the size of the response. The caller can check if the buffer + * was large enough by comparing the return value and rlen + * NOTE: If the response buffer is not below 2 GB, cpcmd can sleep + */ +extern int cpcmd(const char *cmd, char *response, int rlen, int *response_code); #endif /*__s390x__*/ #endif -- cgit v1.2.1 From 66a464dbc8e0345b6f972b92bf1118e043d7c987 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Sat, 25 Jun 2005 14:55:33 -0700 Subject: [PATCH] s390: debug feature changes This patch changes the memory allocation method for the s390 debug feature. Trace buffers had been allocated using the get_free_pages() function before. Therefore it was not possible to get big memory areas in a running system due to memory fragmentation. Now the trace buffers are subdivided into several subbuffers with pagesize. Therefore it is now possible to allocate more memory for the trace buffers and more trace records can be written. In addition to that, dynamic specification of the size of the trace buffers is implemented. It is now possible to change the size of a trace buffer using a new debugfs file instance. When writing a number into this file, the trace buffer size is changed to 'number * pagesize'. In the past all the traces could be obtained from userspace by accessing files in the "proc" filesystem. Now with debugfs we have a new filesystem which should be used for debugging purposes. This patch moves the debug feature from procfs to debugfs. Since the interface of debug_register() changed, all device drivers, which use the debug feature had to be adjusted. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/debug.h | 48 +++++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 19 deletions(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h index 6bbcdea42a86..92360d90144b 100644 --- a/include/asm-s390/debug.h +++ b/include/asm-s390/debug.h @@ -9,6 +9,8 @@ #ifndef DEBUG_H #define DEBUG_H +#include +#include #include /* Note: @@ -31,19 +33,18 @@ struct __debug_entry{ } __attribute__((packed)); -#define __DEBUG_FEATURE_VERSION 1 /* version of debug feature */ +#define __DEBUG_FEATURE_VERSION 2 /* version of debug feature */ #ifdef __KERNEL__ #include #include #include -#include #define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ #define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */ #define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */ #define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */ -#define DEBUG_MAX_PROCF_LEN 64 /* max length for a proc file name */ +#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */ #define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */ #define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */ @@ -64,16 +65,17 @@ typedef struct debug_info { spinlock_t lock; int level; int nr_areas; - int page_order; + int pages_per_area; int buf_size; int entry_size; - debug_entry_t** areas; + debug_entry_t*** areas; int active_area; - int *active_entry; - struct proc_dir_entry* proc_root_entry; - struct proc_dir_entry* proc_entries[DEBUG_MAX_VIEWS]; + int *active_pages; + int *active_entries; + struct dentry* debugfs_root_entry; + struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; struct debug_view* views[DEBUG_MAX_VIEWS]; - char name[DEBUG_MAX_PROCF_LEN]; + char name[DEBUG_MAX_NAME_LEN]; } debug_info_t; typedef int (debug_header_proc_t) (debug_info_t* id, @@ -98,7 +100,7 @@ int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view, int area, debug_entry_t* entry, char* out_buf); struct debug_view { - char name[DEBUG_MAX_PROCF_LEN]; + char name[DEBUG_MAX_NAME_LEN]; debug_prolog_proc_t* prolog_proc; debug_header_proc_t* header_proc; debug_format_proc_t* format_proc; @@ -120,7 +122,7 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level, /* Debug Feature API: */ -debug_info_t* debug_register(char* name, int pages_index, int nr_areas, +debug_info_t* debug_register(char* name, int pages, int nr_areas, int buf_size); void debug_unregister(debug_info_t* id); @@ -132,7 +134,8 @@ void debug_stop_all(void); extern inline debug_entry_t* debug_event(debug_info_t* id, int level, void* data, int length) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,data,length); } @@ -140,7 +143,8 @@ extern inline debug_entry_t* debug_int_event(debug_info_t* id, int level, unsigned int tag) { unsigned int t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,&t,sizeof(unsigned int)); } @@ -148,14 +152,16 @@ extern inline debug_entry_t * debug_long_event (debug_info_t* id, int level, unsigned long tag) { unsigned long t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,&t,sizeof(unsigned long)); } extern inline debug_entry_t* debug_text_event(debug_info_t* id, int level, const char* txt) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_event_common(id,level,txt,strlen(txt)); } @@ -167,7 +173,8 @@ debug_sprintf_event(debug_info_t* id,int level,char *string,...) extern inline debug_entry_t* debug_exception(debug_info_t* id, int level, void* data, int length) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,data,length); } @@ -175,7 +182,8 @@ extern inline debug_entry_t* debug_int_exception(debug_info_t* id, int level, unsigned int tag) { unsigned int t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,&t,sizeof(unsigned int)); } @@ -183,14 +191,16 @@ extern inline debug_entry_t * debug_long_exception (debug_info_t* id, int level, unsigned long tag) { unsigned long t=tag; - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,&t,sizeof(unsigned long)); } extern inline debug_entry_t* debug_text_exception(debug_info_t* id, int level, const char* txt) { - if ((!id) || (level > id->level)) return NULL; + if ((!id) || (level > id->level) || (id->pages_per_area == 0)) + return NULL; return debug_exception_common(id,level,txt,strlen(txt)); } -- cgit v1.2.1 From 4866cde064afbb6c2a488c265e696879de616daa Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 25 Jun 2005 14:57:23 -0700 Subject: [PATCH] sched: cleanup context switch locking Instead of requiring architecture code to interact with the scheduler's locking implementation, provide a couple of defines that can be used by the architecture to request runqueue unlocked context switches, and ask for interrupts to be enabled over the context switch. Also replaces the "switch_lock" used by these architectures with an oncpu flag (note, not a potentially slow bitflag). This eliminates one bus locked memory operation when context switching, and simplifies the task_running function. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/system.h | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index e3cb3ce1d24a..b4a9f05a93d6 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -104,29 +104,18 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -#define prepare_arch_switch(rq, next) do { } while(0) -#define task_running(rq, p) ((rq)->curr == (p)) - #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void account_user_vtime(struct task_struct *); extern void account_system_vtime(struct task_struct *); - -#define finish_arch_switch(rq, prev) do { \ - set_fs(current->thread.mm_segment); \ - spin_unlock(&(rq)->lock); \ - account_system_vtime(prev); \ - local_irq_enable(); \ -} while (0) - #else +#define account_system_vtime(prev) do { } while (0) +#endif #define finish_arch_switch(rq, prev) do { \ set_fs(current->thread.mm_segment); \ - spin_unlock_irq(&(rq)->lock); \ + account_system_vtime(prev); \ } while (0) -#endif - #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,x) \ -- cgit v1.2.1 From cf13f0eaffa31bf6a145c53c589654b11c72ddc7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sat, 25 Jun 2005 14:58:11 -0700 Subject: [PATCH] kexec: s390 support Add kexec support for s390 architecture. From: Milton Miller - Fix passing of first argument to relocate_kernel assembly. - Fix Kconfig description. - Remove wrong comment and comments that describe obvious things. - Allow only KEXEC_TYPE_DEFAULT as image type -> dump not supported. Acked-by: Martin Schwidefsky Signed-off-by: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/kexec.h | 42 ++++++++++++++++++++++++++++++++++++++++++ include/asm-s390/unistd.h | 2 +- 2 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 include/asm-s390/kexec.h (limited to 'include/asm-s390') diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h new file mode 100644 index 000000000000..54cf7d9f251c --- /dev/null +++ b/include/asm-s390/kexec.h @@ -0,0 +1,42 @@ +/* + * include/asm-s390/kexec.h + * + * (C) Copyright IBM Corp. 2005 + * + * Author(s): Rolf Adelsberger + * + */ + +#ifndef _S390_KEXEC_H +#define _S390_KEXEC_H + +#include +#include +/* + * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. + * I.e. Maximum page that is mapped directly into kernel memory, + * and kmap is not required. + */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control pages */ +/* Not more than 2GB */ +#define KEXEC_CONTROL_MEMORY_LIMIT (1<<31) + +/* Allocate one page for the pdp and the second for the code */ +#define KEXEC_CONTROL_CODE_SIZE 4096 + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_S390 + +#define MAX_NOTE_BYTES 1024 +typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; + +extern note_buf_t crash_notes[]; + +#endif /*_S390_KEXEC_H */ diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h index f1a204f7c0f0..363db45f8d07 100644 --- a/include/asm-s390/unistd.h +++ b/include/asm-s390/unistd.h @@ -269,7 +269,7 @@ #define __NR_mq_timedreceive 274 #define __NR_mq_notify 275 #define __NR_mq_getsetattr 276 -/* Number 277 is reserved for new sys_kexec_load */ +#define __NR_kexec_load 277 #define __NR_add_key 278 #define __NR_request_key 279 #define __NR_keyctl 280 -- cgit v1.2.1 From 5ee24d9594ffb070261b70461f71c42913c663bb Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 30 Jun 2005 02:58:48 -0700 Subject: [PATCH] s390: fix finish_arch_switch Commit 4866cde064afbb6c2a488c265e696879de616daa requires finish_arch_switch to have only one parameter instead of two. Also fix another compile error (double declaration of account_system_vtime) if CONFIG_VIRT_CPU_ACCOUNTING is not defined. Signed-off-by: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/system.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index b4a9f05a93d6..864cae7e1fd6 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -107,11 +107,9 @@ static inline void restore_access_regs(unsigned int *acrs) #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void account_user_vtime(struct task_struct *); extern void account_system_vtime(struct task_struct *); -#else -#define account_system_vtime(prev) do { } while (0) #endif -#define finish_arch_switch(rq, prev) do { \ +#define finish_arch_switch(prev) do { \ set_fs(current->thread.mm_segment); \ account_system_vtime(prev); \ } while (0) -- cgit v1.2.1 From 7c9034735eccbf82608a4602c59aaf6053ea9416 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 26 Jul 2005 11:29:55 -0600 Subject: [PATCH] Add emergency_restart() When the kernel is working well and we want to restart cleanly kernel_restart is the function to use. But in many instances the kernel wants to reboot when thing are expected to be working very badly such as from panic or a software watchdog handler. This patch adds the function emergency_restart() so that callers can be clear what semantics they expect when calling restart. emergency_restart() is expected to be callable from interrupt context and possibly reliable in even more trying circumstances. This is an initial generic implementation for all architectures. Signed-off-by: Eric W. Biederman Signed-off-by: Linus Torvalds --- include/asm-s390/emergency-restart.h | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 include/asm-s390/emergency-restart.h (limited to 'include/asm-s390') diff --git a/include/asm-s390/emergency-restart.h b/include/asm-s390/emergency-restart.h new file mode 100644 index 000000000000..108d8c48e42e --- /dev/null +++ b/include/asm-s390/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +#include + +#endif /* _ASM_EMERGENCY_RESTART_H */ -- cgit v1.2.1 From 951f22d5b1f0eaae35dafc669e3774a0c2084d10 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 27 Jul 2005 11:44:57 -0700 Subject: [PATCH] s390: spin lock retry Split spin lock and r/w lock implementation into a single try which is done inline and an out of line function that repeatedly tries to get the lock before doing the cpu_relax(). Add a system control to set the number of retries before a cpu is yielded. The reason for the spin lock retry is that the diagnose 0x44 that is used to give up the virtual cpu is quite expensive. For spin locks that are held only for a short period of time the costs of the diagnoses outweights the savings for spin locks that are held for a longer timer. The default retry count is 1000. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/lowcore.h | 4 +- include/asm-s390/processor.h | 5 +- include/asm-s390/spinlock.h | 252 ++++++++++++++----------------------------- 3 files changed, 83 insertions(+), 178 deletions(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 76b5b19c0ae2..afe6a9f9b0ae 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -90,7 +90,6 @@ #define __LC_SYSTEM_TIMER 0x278 #define __LC_LAST_UPDATE_CLOCK 0x280 #define __LC_STEAL_CLOCK 0x288 -#define __LC_DIAG44_OPCODE 0x290 #define __LC_KERNEL_STACK 0xD40 #define __LC_THREAD_INFO 0xD48 #define __LC_ASYNC_STACK 0xD50 @@ -286,8 +285,7 @@ struct _lowcore __u64 system_timer; /* 0x278 */ __u64 last_update_clock; /* 0x280 */ __u64 steal_clock; /* 0x288 */ - __u32 diag44_opcode; /* 0x290 */ - __u8 pad8[0xc00-0x294]; /* 0x294 */ + __u8 pad8[0xc00-0x290]; /* 0x290 */ /* System info area */ __u64 save_area[16]; /* 0xc00 */ __u8 pad9[0xd40-0xc80]; /* 0xc80 */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 8bd14de69e35..4ec652ebb3b1 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -203,7 +203,10 @@ unsigned long get_wchan(struct task_struct *p); # define cpu_relax() asm volatile ("diag 0,0,68" : : : "memory") #else /* __s390x__ */ # define cpu_relax() \ - asm volatile ("ex 0,%0" : : "i" (__LC_DIAG44_OPCODE) : "memory") + do { \ + if (MACHINE_HAS_DIAG44) \ + asm volatile ("diag 0,0,68" : : : "memory"); \ + } while (0) #endif /* __s390x__ */ /* diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h index 53cc736b9820..8ff10300f7ee 100644 --- a/include/asm-s390/spinlock.h +++ b/include/asm-s390/spinlock.h @@ -11,21 +11,16 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#ifdef __s390x__ -/* - * Grmph, take care of %&#! user space programs that include - * asm/spinlock.h. The diagnose is only available in kernel - * context. - */ -#ifdef __KERNEL__ -#include -#define __DIAG44_INSN "ex" -#define __DIAG44_OPERAND __LC_DIAG44_OPCODE -#else -#define __DIAG44_INSN "#" -#define __DIAG44_OPERAND 0 -#endif -#endif /* __s390x__ */ +static inline int +_raw_compare_and_swap(volatile unsigned int *lock, + unsigned int old, unsigned int new) +{ + asm volatile ("cs %0,%3,0(%4)" + : "=d" (old), "=m" (*lock) + : "0" (old), "d" (new), "a" (lock), "m" (*lock) + : "cc", "memory" ); + return old; +} /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -41,58 +36,35 @@ typedef struct { #endif } __attribute__ ((aligned (4))) spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } -#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } +#define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) #define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock) -#define spin_is_locked(x) ((x)->lock != 0) +#define spin_is_locked(x) ((x)->lock != 0) #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) -extern inline void _raw_spin_lock(spinlock_t *lp) +extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc); +extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc); + +static inline void _raw_spin_lock(spinlock_t *lp) { -#ifndef __s390x__ - unsigned int reg1, reg2; - __asm__ __volatile__(" bras %0,1f\n" - "0: diag 0,0,68\n" - "1: slr %1,%1\n" - " cs %1,%0,0(%3)\n" - " jl 0b\n" - : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock) - : "a" (&lp->lock), "m" (lp->lock) - : "cc", "memory" ); -#else /* __s390x__ */ - unsigned long reg1, reg2; - __asm__ __volatile__(" bras %1,1f\n" - "0: " __DIAG44_INSN " 0,%4\n" - "1: slr %0,%0\n" - " cs %0,%1,0(%3)\n" - " jl 0b\n" - : "=&d" (reg1), "=&d" (reg2), "=m" (lp->lock) - : "a" (&lp->lock), "i" (__DIAG44_OPERAND), - "m" (lp->lock) : "cc", "memory" ); -#endif /* __s390x__ */ + unsigned long pc = (unsigned long) __builtin_return_address(0); + + if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0)) + _raw_spin_lock_wait(lp, pc); } -extern inline int _raw_spin_trylock(spinlock_t *lp) +static inline int _raw_spin_trylock(spinlock_t *lp) { - unsigned long reg; - unsigned int result; - - __asm__ __volatile__(" basr %1,0\n" - "0: cs %0,%1,0(%3)" - : "=d" (result), "=&d" (reg), "=m" (lp->lock) - : "a" (&lp->lock), "m" (lp->lock), "0" (0) - : "cc", "memory" ); - return !result; + unsigned long pc = (unsigned long) __builtin_return_address(0); + + if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0)) + return 1; + return _raw_spin_trylock_retry(lp, pc); } -extern inline void _raw_spin_unlock(spinlock_t *lp) +static inline void _raw_spin_unlock(spinlock_t *lp) { - unsigned int old; - - __asm__ __volatile__("cs %0,%3,0(%4)" - : "=d" (old), "=m" (lp->lock) - : "0" (lp->lock), "d" (0), "a" (lp) - : "cc", "memory" ); + _raw_compare_and_swap(&lp->lock, lp->lock, 0); } /* @@ -106,7 +78,7 @@ extern inline void _raw_spin_unlock(spinlock_t *lp) * read-locks. */ typedef struct { - volatile unsigned long lock; + volatile unsigned int lock; volatile unsigned long owner_pc; #ifdef CONFIG_PREEMPT unsigned int break_lock; @@ -129,123 +101,55 @@ typedef struct { */ #define write_can_lock(x) ((x)->lock == 0) -#ifndef __s390x__ -#define _raw_read_lock(rw) \ - asm volatile(" l 2,0(%1)\n" \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: la 2,0(2)\n" /* clear high (=write) bit */ \ - " la 3,1(2)\n" /* one more reader */ \ - " cs 2,3,0(%1)\n" /* try to write new value */ \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_read_lock(rw) \ - asm volatile(" lg 2,0(%1)\n" \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: nihh 2,0x7fff\n" /* clear high (=write) bit */ \ - " la 3,1(2)\n" /* one more reader */ \ - " csg 2,3,0(%1)\n" /* try to write new value */ \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#ifndef __s390x__ -#define _raw_read_unlock(rw) \ - asm volatile(" l 2,0(%1)\n" \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: lr 3,2\n" \ - " ahi 3,-1\n" /* one less reader */ \ - " cs 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_read_unlock(rw) \ - asm volatile(" lg 2,0(%1)\n" \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: lgr 3,2\n" \ - " bctgr 3,0\n" /* one less reader */ \ - " csg 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#ifndef __s390x__ -#define _raw_write_lock(rw) \ - asm volatile(" lhi 3,1\n" \ - " sll 3,31\n" /* new lock value = 0x80000000 */ \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: slr 2,2\n" /* old lock value must be 0 */ \ - " cs 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_write_lock(rw) \ - asm volatile(" llihh 3,0x8000\n" /* new lock value = 0x80...0 */ \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: slgr 2,2\n" /* old lock value must be 0 */ \ - " csg 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#ifndef __s390x__ -#define _raw_write_unlock(rw) \ - asm volatile(" slr 3,3\n" /* new lock value = 0 */ \ - " j 1f\n" \ - "0: diag 0,0,68\n" \ - "1: lhi 2,1\n" \ - " sll 2,31\n" /* old lock value must be 0x80000000 */ \ - " cs 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) : "a" (&(rw)->lock), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#else /* __s390x__ */ -#define _raw_write_unlock(rw) \ - asm volatile(" slgr 3,3\n" /* new lock value = 0 */ \ - " j 1f\n" \ - "0: " __DIAG44_INSN " 0,%2\n" \ - "1: llihh 2,0x8000\n" /* old lock value must be 0x8..0 */\ - " csg 2,3,0(%1)\n" \ - " jl 0b" \ - : "=m" ((rw)->lock) \ - : "a" (&(rw)->lock), "i" (__DIAG44_OPERAND), \ - "m" ((rw)->lock) : "2", "3", "cc", "memory" ) -#endif /* __s390x__ */ - -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) - -extern inline int _raw_write_trylock(rwlock_t *rw) +extern void _raw_read_lock_wait(rwlock_t *lp); +extern int _raw_read_trylock_retry(rwlock_t *lp); +extern void _raw_write_lock_wait(rwlock_t *lp); +extern int _raw_write_trylock_retry(rwlock_t *lp); + +static inline void _raw_read_lock(rwlock_t *rw) +{ + unsigned int old; + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + _raw_read_lock_wait(rw); +} + +static inline void _raw_read_unlock(rwlock_t *rw) +{ + unsigned int old, cmp; + + old = rw->lock; + do { + cmp = old; + old = _raw_compare_and_swap(&rw->lock, old, old - 1); + } while (cmp != old); +} + +static inline void _raw_write_lock(rwlock_t *rw) +{ + if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + _raw_write_lock_wait(rw); +} + +static inline void _raw_write_unlock(rwlock_t *rw) +{ + _raw_compare_and_swap(&rw->lock, 0x80000000, 0); +} + +static inline int _raw_read_trylock(rwlock_t *rw) +{ + unsigned int old; + old = rw->lock & 0x7fffffffU; + if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) + return 1; + return _raw_read_trylock_retry(rw); +} + +static inline int _raw_write_trylock(rwlock_t *rw) { - unsigned long result, reg; - - __asm__ __volatile__( -#ifndef __s390x__ - " lhi %1,1\n" - " sll %1,31\n" - " cs %0,%1,0(%3)" -#else /* __s390x__ */ - " llihh %1,0x8000\n" - "0: csg %0,%1,0(%3)\n" -#endif /* __s390x__ */ - : "=d" (result), "=&d" (reg), "=m" (rw->lock) - : "a" (&rw->lock), "m" (rw->lock), "0" (0UL) - : "cc", "memory" ); - return result == 0; + if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) + return 1; + return _raw_write_trylock_retry(rw); } #endif /* __ASM_SPINLOCK_H */ -- cgit v1.2.1 From afff7e2b3b13dbd26a2b9991d3d571df111d92e8 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 27 Jul 2005 11:44:58 -0700 Subject: [PATCH] s390: find_next_{zero}_bit fixes The find_next_{zero}_bit primitives on s390* should never return a bit number bigger then the bit field size. In the case of a bitfield that doesn't end on a word boundary, an offset that makes the search start at the last word of the bit field and the last word doesn't contain any zero/one bits the search is continued with a call to find_first_bit with a negative size. The search normally ends pretty quickly because the words following the bit field contain a mix of zeros and ones. But the bit number that is returned in this case is too big. To fix this and additional if to check for this case is needed. To make the code easier to read I removed the assembler parts from the find_next_{zero}_bit functions, the C-ified code is as good. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/bitops.h | 440 +++++++++++++++------------------------------- 1 file changed, 140 insertions(+), 300 deletions(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 16bb08499c7f..8651524217fd 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -527,13 +527,64 @@ __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { __constant_test_bit((nr),(addr)) : \ __test_bit((nr),(addr)) ) -#ifndef __s390x__ +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + */ +static inline unsigned long ffz(unsigned long word) +{ + unsigned long bit = 0; + +#ifdef __s390x__ + if (likely((word & 0xffffffff) == 0xffffffff)) { + word >>= 32; + bit += 32; + } +#endif + if (likely((word & 0xffff) == 0xffff)) { + word >>= 16; + bit += 16; + } + if (likely((word & 0xff) == 0xff)) { + word >>= 8; + bit += 8; + } + return bit + _zb_findmap[word & 0xff]; +} + +/* + * __ffs = find first bit in word. Undefined if no bit exists, + * so code should check against 0UL first.. + */ +static inline unsigned long __ffs (unsigned long word) +{ + unsigned long bit = 0; + +#ifdef __s390x__ + if (likely((word & 0xffffffff) == 0)) { + word >>= 32; + bit += 32; + } +#endif + if (likely((word & 0xffff) == 0)) { + word >>= 16; + bit += 16; + } + if (likely((word & 0xff) == 0)) { + word >>= 8; + bit += 8; + } + return bit + _sb_findmap[word & 0xff]; +} /* * Find-bit routines.. */ + +#ifndef __s390x__ + static inline int -find_first_zero_bit(const unsigned long * addr, unsigned int size) +find_first_zero_bit(const unsigned long * addr, unsigned long size) { typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; unsigned long cmp, count; @@ -548,7 +599,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned int size) " srl %2,5\n" "0: c %1,0(%0,%4)\n" " jne 1f\n" - " ahi %0,4\n" + " la %0,4(%0)\n" " brct %2,0b\n" " lr %0,%3\n" " j 4f\n" @@ -574,7 +625,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned int size) } static inline int -find_first_bit(const unsigned long * addr, unsigned int size) +find_first_bit(const unsigned long * addr, unsigned long size) { typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; unsigned long cmp, count; @@ -589,7 +640,7 @@ find_first_bit(const unsigned long * addr, unsigned int size) " srl %2,5\n" "0: c %1,0(%0,%4)\n" " jne 1f\n" - " ahi %0,4\n" + " la %0,4(%0)\n" " brct %2,0b\n" " lr %0,%3\n" " j 4f\n" @@ -614,89 +665,8 @@ find_first_bit(const unsigned long * addr, unsigned int size) return (res < size) ? res : size; } -static inline int -find_next_zero_bit (const unsigned long * addr, int size, int offset) -{ - unsigned long * p = ((unsigned long *) addr) + (offset >> 5); - unsigned long bitvec, reg; - int set, bit = offset & 31, res; - - if (bit) { - /* - * Look for zero in first word - */ - bitvec = (*p) >> bit; - __asm__(" slr %0,%0\n" - " lhi %2,0xff\n" - " tml %1,0xffff\n" - " jno 0f\n" - " ahi %0,16\n" - " srl %1,16\n" - "0: tml %1,0x00ff\n" - " jno 1f\n" - " ahi %0,8\n" - " srl %1,8\n" - "1: nr %1,%2\n" - " ic %1,0(%1,%3)\n" - " alr %0,%1" - : "=&d" (set), "+a" (bitvec), "=&d" (reg) - : "a" (&_zb_findmap) : "cc" ); - if (set < (32 - bit)) - return set + offset; - offset += 32 - bit; - p++; - } - /* - * No zero yet, search remaining full words for a zero - */ - res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr)); - return (offset + res); -} - -static inline int -find_next_bit (const unsigned long * addr, int size, int offset) -{ - unsigned long * p = ((unsigned long *) addr) + (offset >> 5); - unsigned long bitvec, reg; - int set, bit = offset & 31, res; - - if (bit) { - /* - * Look for set bit in first word - */ - bitvec = (*p) >> bit; - __asm__(" slr %0,%0\n" - " lhi %2,0xff\n" - " tml %1,0xffff\n" - " jnz 0f\n" - " ahi %0,16\n" - " srl %1,16\n" - "0: tml %1,0x00ff\n" - " jnz 1f\n" - " ahi %0,8\n" - " srl %1,8\n" - "1: nr %1,%2\n" - " ic %1,0(%1,%3)\n" - " alr %0,%1" - : "=&d" (set), "+a" (bitvec), "=&d" (reg) - : "a" (&_sb_findmap) : "cc" ); - if (set < (32 - bit)) - return set + offset; - offset += 32 - bit; - p++; - } - /* - * No set bit yet, search remaining full words for a bit - */ - res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr)); - return (offset + res); -} - #else /* __s390x__ */ -/* - * Find-bit routines.. - */ static inline unsigned long find_first_zero_bit(const unsigned long * addr, unsigned long size) { @@ -712,7 +682,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size) " srlg %2,%2,6\n" "0: cg %1,0(%0,%4)\n" " jne 1f\n" - " aghi %0,8\n" + " la %0,8(%0)\n" " brct %2,0b\n" " lgr %0,%3\n" " j 5f\n" @@ -785,143 +755,66 @@ find_first_bit(const unsigned long * addr, unsigned long size) return (res < size) ? res : size; } -static inline unsigned long -find_next_zero_bit (const unsigned long * addr, unsigned long size, unsigned long offset) -{ - unsigned long * p = ((unsigned long *) addr) + (offset >> 6); - unsigned long bitvec, reg; - unsigned long set, bit = offset & 63, res; - - if (bit) { - /* - * Look for zero in first word - */ - bitvec = (*p) >> bit; - __asm__(" lhi %2,-1\n" - " slgr %0,%0\n" - " clr %1,%2\n" - " jne 0f\n" - " aghi %0,32\n" - " srlg %1,%1,32\n" - "0: lghi %2,0xff\n" - " tmll %1,0xffff\n" - " jno 1f\n" - " aghi %0,16\n" - " srlg %1,%1,16\n" - "1: tmll %1,0x00ff\n" - " jno 2f\n" - " aghi %0,8\n" - " srlg %1,%1,8\n" - "2: ngr %1,%2\n" - " ic %1,0(%1,%3)\n" - " algr %0,%1" - : "=&d" (set), "+a" (bitvec), "=&d" (reg) - : "a" (&_zb_findmap) : "cc" ); - if (set < (64 - bit)) - return set + offset; - offset += 64 - bit; - p++; - } - /* - * No zero yet, search remaining full words for a zero - */ - res = find_first_zero_bit (p, size - 64 * (p - (unsigned long *) addr)); - return (offset + res); -} - -static inline unsigned long -find_next_bit (const unsigned long * addr, unsigned long size, unsigned long offset) -{ - unsigned long * p = ((unsigned long *) addr) + (offset >> 6); - unsigned long bitvec, reg; - unsigned long set, bit = offset & 63, res; - - if (bit) { - /* - * Look for zero in first word - */ - bitvec = (*p) >> bit; - __asm__(" slgr %0,%0\n" - " ltr %1,%1\n" - " jnz 0f\n" - " aghi %0,32\n" - " srlg %1,%1,32\n" - "0: lghi %2,0xff\n" - " tmll %1,0xffff\n" - " jnz 1f\n" - " aghi %0,16\n" - " srlg %1,%1,16\n" - "1: tmll %1,0x00ff\n" - " jnz 2f\n" - " aghi %0,8\n" - " srlg %1,%1,8\n" - "2: ngr %1,%2\n" - " ic %1,0(%1,%3)\n" - " algr %0,%1" - : "=&d" (set), "+a" (bitvec), "=&d" (reg) - : "a" (&_sb_findmap) : "cc" ); - if (set < (64 - bit)) - return set + offset; - offset += 64 - bit; - p++; - } - /* - * No set bit yet, search remaining full words for a bit - */ - res = find_first_bit (p, size - 64 * (p - (unsigned long *) addr)); - return (offset + res); -} - #endif /* __s390x__ */ -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static inline unsigned long ffz(unsigned long word) +static inline int +find_next_zero_bit (const unsigned long * addr, unsigned long size, + unsigned long offset) { - unsigned long bit = 0; - -#ifdef __s390x__ - if (likely((word & 0xffffffff) == 0xffffffff)) { - word >>= 32; - bit += 32; - } -#endif - if (likely((word & 0xffff) == 0xffff)) { - word >>= 16; - bit += 16; + const unsigned long *p; + unsigned long bit, set; + + if (offset >= size) + return size; + bit = offset & (__BITOPS_WORDSIZE - 1); + offset -= bit; + size -= offset; + p = addr + offset / __BITOPS_WORDSIZE; + if (bit) { + /* + * s390 version of ffz returns __BITOPS_WORDSIZE + * if no zero bit is present in the word. + */ + set = ffz(*p >> bit) + bit; + if (set >= size) + return size + offset; + if (set < __BITOPS_WORDSIZE) + return set + offset; + offset += __BITOPS_WORDSIZE; + size -= __BITOPS_WORDSIZE; + p++; } - if (likely((word & 0xff) == 0xff)) { - word >>= 8; - bit += 8; - } - return bit + _zb_findmap[word & 0xff]; + return offset + find_first_zero_bit(p, size); } -/* - * __ffs = find first bit in word. Undefined if no bit exists, - * so code should check against 0UL first.. - */ -static inline unsigned long __ffs (unsigned long word) +static inline int +find_next_bit (const unsigned long * addr, unsigned long size, + unsigned long offset) { - unsigned long bit = 0; - -#ifdef __s390x__ - if (likely((word & 0xffffffff) == 0)) { - word >>= 32; - bit += 32; + const unsigned long *p; + unsigned long bit, set; + + if (offset >= size) + return size; + bit = offset & (__BITOPS_WORDSIZE - 1); + offset -= bit; + size -= offset; + p = addr + offset / __BITOPS_WORDSIZE; + if (bit) { + /* + * s390 version of __ffs returns __BITOPS_WORDSIZE + * if no one bit is present in the word. + */ + set = __ffs(*p & (~0UL << bit)); + if (set >= size) + return size + offset; + if (set < __BITOPS_WORDSIZE) + return set + offset; + offset += __BITOPS_WORDSIZE; + size -= __BITOPS_WORDSIZE; + p++; } -#endif - if (likely((word & 0xffff) == 0)) { - word >>= 16; - bit += 16; - } - if (likely((word & 0xff) == 0)) { - word >>= 8; - bit += 8; - } - return bit + _sb_findmap[word & 0xff]; + return offset + find_first_bit(p, size); } /* @@ -1031,49 +924,6 @@ ext2_find_first_zero_bit(void *vaddr, unsigned int size) return (res < size) ? res : size; } -static inline int -ext2_find_next_zero_bit(void *vaddr, unsigned int size, unsigned offset) -{ - unsigned long *addr = vaddr; - unsigned long *p = addr + (offset >> 5); - unsigned long word, reg; - unsigned int bit = offset & 31UL, res; - - if (offset >= size) - return size; - - if (bit) { - __asm__(" ic %0,0(%1)\n" - " icm %0,2,1(%1)\n" - " icm %0,4,2(%1)\n" - " icm %0,8,3(%1)" - : "=&a" (word) : "a" (p) : "cc" ); - word >>= bit; - res = bit; - /* Look for zero in first longword */ - __asm__(" lhi %2,0xff\n" - " tml %1,0xffff\n" - " jno 0f\n" - " ahi %0,16\n" - " srl %1,16\n" - "0: tml %1,0x00ff\n" - " jno 1f\n" - " ahi %0,8\n" - " srl %1,8\n" - "1: nr %1,%2\n" - " ic %1,0(%1,%3)\n" - " alr %0,%1" - : "+&d" (res), "+&a" (word), "=&d" (reg) - : "a" (&_zb_findmap) : "cc" ); - if (res < 32) - return (p - addr)*32 + res; - p++; - } - /* No zero yet, search remaining full bytes for a zero */ - res = ext2_find_first_zero_bit (p, size - 32 * (p - addr)); - return (p - addr) * 32 + res; -} - #else /* __s390x__ */ static inline unsigned long @@ -1120,56 +970,46 @@ ext2_find_first_zero_bit(void *vaddr, unsigned long size) return (res < size) ? res : size; } -static inline unsigned long +#endif /* __s390x__ */ + +static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) { - unsigned long *addr = vaddr; - unsigned long *p = addr + (offset >> 6); - unsigned long word, reg; - unsigned long bit = offset & 63UL, res; + unsigned long *addr = vaddr, *p; + unsigned long word, bit, set; if (offset >= size) return size; - + bit = offset & (__BITOPS_WORDSIZE - 1); + offset -= bit; + size -= offset; + p = addr + offset / __BITOPS_WORDSIZE; if (bit) { - __asm__(" lrvg %0,%1" /* load reversed, neat instruction */ - : "=a" (word) : "m" (*p) ); - word >>= bit; - res = bit; - /* Look for zero in first 8 byte word */ - __asm__(" lghi %2,0xff\n" - " tmll %1,0xffff\n" - " jno 2f\n" - " ahi %0,16\n" - " srlg %1,%1,16\n" - "0: tmll %1,0xffff\n" - " jno 2f\n" - " ahi %0,16\n" - " srlg %1,%1,16\n" - "1: tmll %1,0xffff\n" - " jno 2f\n" - " ahi %0,16\n" - " srl %1,16\n" - "2: tmll %1,0x00ff\n" - " jno 3f\n" - " ahi %0,8\n" - " srl %1,8\n" - "3: ngr %1,%2\n" - " ic %1,0(%1,%3)\n" - " alr %0,%1" - : "+&d" (res), "+a" (word), "=&d" (reg) - : "a" (&_zb_findmap) : "cc" ); - if (res < 64) - return (p - addr)*64 + res; - p++; +#ifndef __s390x__ + asm(" ic %0,0(%1)\n" + " icm %0,2,1(%1)\n" + " icm %0,4,2(%1)\n" + " icm %0,8,3(%1)" + : "=&a" (word) : "a" (p), "m" (*p) : "cc" ); +#else + asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) ); +#endif + /* + * s390 version of ffz returns __BITOPS_WORDSIZE + * if no zero bit is present in the word. + */ + set = ffz(word >> bit) + bit; + if (set >= size) + return size + offset; + if (set < __BITOPS_WORDSIZE) + return set + offset; + offset += __BITOPS_WORDSIZE; + size -= __BITOPS_WORDSIZE; + p++; } - /* No zero yet, search remaining full bytes for a zero */ - res = ext2_find_first_zero_bit (p, size - 64 * (p - addr)); - return (p - addr) * 64 + res; + return offset + ext2_find_first_zero_bit(p, size); } -#endif /* __s390x__ */ - /* Bitmap functions for the minix filesystem. */ /* FIXME !!! */ #define minix_test_and_set_bit(nr,addr) \ -- cgit v1.2.1 From 46ee058cdb3abab9313cc9cb9e9927d7672a718c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 27 Jul 2005 11:44:59 -0700 Subject: [PATCH] s390: atomic64 inline functions The atomic64 primitives are supposed to have 64-bit parameters instead of int. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/atomic.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index d5a05cf47168..9d86ba6f12d0 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h @@ -123,19 +123,19 @@ typedef struct { #define atomic64_read(v) ((v)->counter) #define atomic64_set(v,i) (((v)->counter) = (i)) -static __inline__ void atomic64_add(int i, atomic64_t * v) +static __inline__ void atomic64_add(long long i, atomic64_t * v) { __CSG_LOOP(v, i, "agr"); } -static __inline__ long long atomic64_add_return(int i, atomic64_t * v) +static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) { return __CSG_LOOP(v, i, "agr"); } -static __inline__ long long atomic64_add_negative(int i, atomic64_t * v) +static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) { return __CSG_LOOP(v, i, "agr") < 0; } -static __inline__ void atomic64_sub(int i, atomic64_t * v) +static __inline__ void atomic64_sub(long long i, atomic64_t * v) { __CSG_LOOP(v, i, "sgr"); } -- cgit v1.2.1 From d2013485a52fb7ece48e5688b443cc098f4dbbdf Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 1 Aug 2005 21:11:34 -0700 Subject: [PATCH] s390: ioprio & inotify system calls. Add system calls for io priorities and inotify. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/unistd.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/asm-s390') diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h index 363db45f8d07..221e965da924 100644 --- a/include/asm-s390/unistd.h +++ b/include/asm-s390/unistd.h @@ -274,8 +274,13 @@ #define __NR_request_key 279 #define __NR_keyctl 280 #define __NR_waitid 281 +#define __NR_ioprio_set 282 +#define __NR_ioprio_get 283 +#define __NR_inotify_init 284 +#define __NR_inotify_add_watch 285 +#define __NR_inotify_rm_watch 286 -#define NR_syscalls 282 +#define NR_syscalls 287 /* * There are some system calls that are not present on 64 bit, some -- cgit v1.2.1