summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-09-04 06:42:01 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-04 06:42:01 -0400
commitfc851fad00d7fa1bf4ac7034d9ba8041bf482d50 (patch)
tree779a8b572d1701ef2c46755df751d9275650cdf4 /include/asm-x86_64
parent85cd7251b9112e3dabeac9fd3b175601ca607241 (diff)
parentf9bcda7760e1373615c9f6d9ce24209b0ab97de1 (diff)
downloadtalos-op-linux-fc851fad00d7fa1bf4ac7034d9ba8041bf482d50.tar.gz
talos-op-linux-fc851fad00d7fa1bf4ac7034d9ba8041bf482d50.zip
Merge branch 'upstream' into pata-drivers
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/alternative.h21
-rw-r--r--include/asm-x86_64/processor.h6
-rw-r--r--include/asm-x86_64/spinlock.h11
-rw-r--r--include/asm-x86_64/unistd.h11
-rw-r--r--include/asm-x86_64/unwind.h1
5 files changed, 18 insertions, 32 deletions
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index aa67bfd1b3ce..a584826cc570 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <asm/cpufeature.h>
struct alt_instr {
u8 *instr; /* original instruction */
@@ -102,9 +103,6 @@ static inline void alternatives_smp_switch(int smp) {}
/*
* Alternative inline assembly for SMP.
*
- * alternative_smp() takes two versions (SMP first, UP second) and is
- * for more complex stuff such as spinlocks.
- *
* The LOCK_PREFIX macro defined here replaces the LOCK and
* LOCK_PREFIX macros used everywhere in the source tree.
*
@@ -124,21 +122,6 @@ static inline void alternatives_smp_switch(int smp) {}
*/
#ifdef CONFIG_SMP
-#define alternative_smp(smpinstr, upinstr, args...) \
- asm volatile ("661:\n\t" smpinstr "\n662:\n" \
- ".section .smp_altinstructions,\"a\"\n" \
- " .align 8\n" \
- " .quad 661b\n" /* label */ \
- " .quad 663f\n" /* new instruction */ \
- " .byte 0x66\n" /* X86_FEATURE_UP */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .smp_altinstr_replacement,\"awx\"\n" \
- "663:\n\t" upinstr "\n" /* replacement */ \
- "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
- ".previous" : args)
-
#define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \
" .align 8\n" \
@@ -147,8 +130,6 @@ static inline void alternatives_smp_switch(int smp) {}
"661:\n\tlock; "
#else /* ! CONFIG_SMP */
-#define alternative_smp(smpinstr, upinstr, args...) \
- asm volatile (upinstr : args)
#define LOCK_PREFIX ""
#endif
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 3b3c1217fe61..de9c3147ee4c 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -232,8 +232,14 @@ struct tss_struct {
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
} __attribute__((packed)) ____cacheline_aligned;
+
extern struct cpuinfo_x86 boot_cpu_data;
DECLARE_PER_CPU(struct tss_struct,init_tss);
+/* Save the original ist values for checking stack pointers during debugging */
+struct orig_ist {
+ unsigned long ist[7];
+};
+DECLARE_PER_CPU(struct orig_ist, orig_ist);
#ifdef CONFIG_X86_VSMP
#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 8d3421996f94..248a79f0eaff 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -21,7 +21,7 @@
#define __raw_spin_lock_string \
"\n1:\t" \
- "lock ; decl %0\n\t" \
+ LOCK_PREFIX " ; decl %0\n\t" \
"js 2f\n" \
LOCK_SECTION_START("") \
"2:\t" \
@@ -40,10 +40,7 @@
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
- alternative_smp(
- __raw_spin_lock_string,
- __raw_spin_lock_string_up,
- "=m" (lock->slock) : : "memory");
+ asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -125,12 +122,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
+ asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
: "=m" (rw->lock) : : "memory");
}
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 94387c915e53..2d89d309a2a8 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -620,8 +620,6 @@ __SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_move_pages 279
__SYSCALL(__NR_move_pages, sys_move_pages)
-#ifdef __KERNEL__
-
#define __NR_syscall_max __NR_move_pages
#ifndef __NO_STUBS
@@ -746,6 +744,8 @@ __syscall_return(type,__res); \
#else /* __KERNEL_SYSCALLS__ */
+#ifdef __KERNEL__
+
#include <linux/syscalls.h>
#include <asm/ptrace.h>
@@ -838,9 +838,9 @@ asmlinkage long sys_rt_sigaction(int sig,
struct sigaction __user *oact,
size_t sigsetsize);
-#endif /* __ASSEMBLY__ */
+#endif
-#endif /* __NO_STUBS */
+#endif
/*
* "Conditional" syscalls
@@ -850,5 +850,6 @@ asmlinkage long sys_rt_sigaction(int sig,
*/
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#endif /* __KERNEL__ */
+#endif
+
#endif
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
index f3e7124effe3..1f6e9bfb569e 100644
--- a/include/asm-x86_64/unwind.h
+++ b/include/asm-x86_64/unwind.h
@@ -95,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
#else
#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
static inline int arch_unw_user_mode(const void *info)
{
OpenPOWER on IntegriCloud