diff options
Diffstat (limited to 'arch/powerpc/include')
65 files changed, 1333 insertions, 859 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 5ab7d7fe198c..9268602de5d0 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -35,3 +35,4 @@ unifdef-y += spu_info.h unifdef-y += termios.h unifdef-y += types.h unifdef-y += unistd.h +unifdef-y += swab.h diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index f3fc733758f5..b401950f5259 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -5,7 +5,7 @@ * PowerPC atomic operations */ -typedef struct { int counter; } atomic_t; +#include <linux/types.h> #ifdef __KERNEL__ #include <linux/compiler.h> @@ -111,7 +111,7 @@ static __inline__ void atomic_inc(atomic_t *v) bne- 1b" : "=&r" (t), "+m" (v->counter) : "r" (&v->counter) - : "cc"); + : "cc", "xer"); } static __inline__ int atomic_inc_return(atomic_t *v) @@ -128,7 +128,7 @@ static __inline__ int atomic_inc_return(atomic_t *v) ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -155,7 +155,7 @@ static __inline__ void atomic_dec(atomic_t *v) bne- 1b" : "=&r" (t), "+m" (v->counter) : "r" (&v->counter) - : "cc"); + : "cc", "xer"); } static __inline__ int atomic_dec_return(atomic_t *v) @@ -172,7 +172,7 @@ static __inline__ int atomic_dec_return(atomic_t *v) ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -251,8 +251,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) #ifdef __powerpc64__ -typedef struct { long counter; } atomic64_t; - #define ATOMIC64_INIT(i) { (i) } static __inline__ long atomic64_read(const atomic64_t *v) @@ -346,7 +344,7 @@ static __inline__ void atomic64_inc(atomic64_t *v) bne- 1b" : "=&r" (t), "+m" (v->counter) : "r" (&v->counter) - : "cc"); + : "cc", "xer"); } static __inline__ long atomic64_inc_return(atomic64_t *v) @@ -362,7 +360,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -388,7 +386,7 @@ static __inline__ void atomic64_dec(atomic64_t *v) bne- 1b" : "=&r" (t), "+m" (v->counter) : "r" (&v->counter) - : "cc"); + : "cc", "xer"); } static __inline__ long atomic64_dec_return(atomic64_t *v) @@ -404,7 +402,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -431,7 +429,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) "\n\ 2:" : "=&r" (t) : "r" (&v->counter) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index e55d1f66b86f..64e1fdca233e 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -3,6 +3,7 @@ #ifdef __KERNEL__ #include <asm/asm-compat.h> + /* * Define an illegal instr to trap on the bug. * We don't use 0 because that marks the end of a function @@ -14,6 +15,7 @@ #ifdef CONFIG_BUG #ifdef __ASSEMBLY__ +#include <asm/asm-offsets.h> #ifdef CONFIG_DEBUG_BUGVERBOSE .macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"a" @@ -26,7 +28,7 @@ .previous .endm #else - .macro EMIT_BUG_ENTRY addr,file,line,flags +.macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"a" 5001: PPC_LONG \addr .short \flags @@ -113,6 +115,13 @@ #define HAVE_ARCH_BUG_ON #define HAVE_ARCH_WARN_ON #endif /* __ASSEMBLY __ */ +#else +#ifdef __ASSEMBLY__ +.macro EMIT_BUG_ENTRY addr,file,line,flags +.endm +#else /* !__ASSEMBLY__ */ +#define _EMIT_BUG_ENTRY +#endif #endif /* CONFIG_BUG */ #include <asm-generic/bug.h> diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h index b37752214a16..5cca27a41532 100644 --- a/arch/powerpc/include/asm/byteorder.h +++ b/arch/powerpc/include/asm/byteorder.h @@ -8,82 +8,7 @@ * 2 of the License, or (at your option) any later version. */ -#include <asm/types.h> -#include <linux/compiler.h> - -#ifdef __GNUC__ -#ifdef __KERNEL__ - -static __inline__ __u16 ld_le16(const volatile __u16 *addr) -{ - __u16 val; - - __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); - return val; -} - -static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) -{ - __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); -} - -static __inline__ __u32 ld_le32(const volatile __u32 *addr) -{ - __u32 val; - - __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); - return val; -} - -static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) -{ - __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); -} - -static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value) -{ - __u16 result; - - __asm__("rlwimi %0,%1,8,16,23" - : "=r" (result) - : "r" (value), "0" (value >> 8)); - return result; -} - -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value) -{ - __u32 result; - - __asm__("rlwimi %0,%1,24,16,23\n\t" - "rlwimi %0,%1,8,8,15\n\t" - "rlwimi %0,%1,24,0,7" - : "=r" (result) - : "r" (value), "0" (value >> 24)); - return result; -} - -#define __arch__swab16(x) ___arch__swab16(x) -#define __arch__swab32(x) ___arch__swab32(x) - -/* The same, but returns converted value from the location pointer by addr. */ -#define __arch__swab16p(addr) ld_le16(addr) -#define __arch__swab32p(addr) ld_le32(addr) - -/* The same, but do the conversion in situ, ie. put the value back to addr. */ -#define __arch__swab16s(addr) st_le16(addr,*addr) -#define __arch__swab32s(addr) st_le32(addr,*addr) - -#endif /* __KERNEL__ */ - -#ifndef __STRICT_ANSI__ -#define __BYTEORDER_HAS_U64__ -#ifndef __powerpc64__ -#define __SWAB_64_THRU_32__ -#endif /* __powerpc64__ */ -#endif /* __STRICT_ANSI__ */ - -#endif /* __GNUC__ */ - +#include <asm/swab.h> #include <linux/byteorder/big_endian.h> #endif /* _ASM_POWERPC_BYTEORDER_H */ diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 1e94b07a020e..4911104791c3 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -82,6 +82,7 @@ struct cpu_spec { char *cpu_name; unsigned long cpu_features; /* Kernel features */ unsigned int cpu_user_features; /* Userland features */ + unsigned int mmu_features; /* MMU features */ /* cache line sizes */ unsigned int icache_bsize; @@ -144,17 +145,14 @@ extern const char *powerpc_base_platform; #define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) #define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080) #define CPU_FTR_601 ASM_CONST(0x0000000000000100) -#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200) #define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) #define CPU_FTR_L3CR ASM_CONST(0x0000000000000800) #define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000) #define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000) #define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000) #define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000) -#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000) #define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) #define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) -#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000) #define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) #define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) #define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) @@ -163,6 +161,8 @@ extern const char *powerpc_base_platform; #define CPU_FTR_SPE ASM_CONST(0x0000000002000000) #define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) #define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) +#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000) +#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000) /* * Add the 64-bit processor unique features in the top half of the word; @@ -177,7 +177,6 @@ extern const char *powerpc_base_platform; #define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000) #define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000) #define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000) -#define CPU_FTR_NOEXECUTE LONG_ASM_CONST(0x0000000800000000) #define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) #define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) #define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) @@ -194,6 +193,7 @@ extern const char *powerpc_base_platform; #define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) #define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) +#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000) #ifndef __ASSEMBLY__ @@ -264,164 +264,159 @@ extern const char *powerpc_base_platform; !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ !defined(CONFIG_BOOKE)) -#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE | \ +#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE) #define CPU_FTRS_603 (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_604 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPC_LE) + CPU_FTR_USE_TB | CPU_FTR_PPC_LE) #define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_740 (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_PPC_LE) #define CPU_FTRS_750 (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_PPC_LE) -#define CPU_FTRS_750CL (CPU_FTRS_750 | CPU_FTR_HAS_HIGH_BATS) +#define CPU_FTRS_750CL (CPU_FTRS_750) #define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM) #define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM) -#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | \ - CPU_FTR_HAS_HIGH_BATS) +#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX) #define CPU_FTRS_750GX (CPU_FTRS_750FX) #define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ + CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_7400 (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ - CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ + CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE) #define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) #define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \ - CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) + CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) #define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ - CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) + CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE) #define CPU_FTRS_7455 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \ CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7447 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7447A (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_7448 (CPU_FTR_COMMON | \ CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ - CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \ CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_82XX (CPU_FTR_COMMON | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB) #define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS) + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP) #define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_COMMON) #define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \ + CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE) -#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | \ - CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE) +#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB) #define CPU_FTRS_8XX (CPU_FTR_USE_TB) -#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN) -#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN) +#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) +#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ + CPU_FTR_INDEXED_DCR) #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ - CPU_FTR_UNIFIED_ID_CACHE) + CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN) + CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_NOEXECUTE) #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | \ - CPU_FTR_NODSISRALIGN) + CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \ - CPU_FTR_L2CSR | CPU_FTR_LWSYNC) + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ #define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) + CPU_FTR_IABR | CPU_FTR_PPC_LE) #define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ + CPU_FTR_IABR | \ CPU_FTR_MMCRA | CPU_FTR_CTRL) #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR) #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR) + CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD) #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR | CPU_FTR_SAO) #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \ - CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ) + CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ + CPU_FTR_UNALIGNED_LD_STD) #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_PPCAS_ARCH_V2 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) -#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2) +#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) #ifdef __powerpc64__ #define CPU_FTRS_POSSIBLE \ @@ -452,7 +447,7 @@ enum { CPU_FTRS_40X | #endif #ifdef CONFIG_44x - CPU_FTRS_44X | + CPU_FTRS_44X | CPU_FTRS_440x6 | #endif #ifdef CONFIG_E200 CPU_FTRS_E200 | @@ -492,7 +487,7 @@ enum { CPU_FTRS_40X & #endif #ifdef CONFIG_44x - CPU_FTRS_44X & + CPU_FTRS_44X & CPU_FTRS_440x6 & #endif #ifdef CONFIG_E200 CPU_FTRS_E200 & diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h index 72d2b72c7390..7d2e6235726d 100644 --- a/arch/powerpc/include/asm/dcr-native.h +++ b/arch/powerpc/include/asm/dcr-native.h @@ -23,6 +23,7 @@ #ifndef __ASSEMBLY__ #include <linux/spinlock.h> +#include <asm/cputable.h> typedef struct { unsigned int base; @@ -39,23 +40,45 @@ static inline bool dcr_map_ok_native(dcr_host_native_t host) #define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base) #define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value) -/* Device Control Registers */ -void __mtdcr(int reg, unsigned int val); -unsigned int __mfdcr(int reg); +/* Table based DCR accessors */ +extern void __mtdcr(unsigned int reg, unsigned int val); +extern unsigned int __mfdcr(unsigned int reg); + +/* mfdcrx/mtdcrx instruction based accessors. We hand code + * the opcodes in order not to depend on newer binutils + */ +static inline unsigned int mfdcrx(unsigned int reg) +{ + unsigned int ret; + asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)" + : "=r" (ret) : "r" (reg)); + return ret; +} + +static inline void mtdcrx(unsigned int reg, unsigned int val) +{ + asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)" + : : "r" (val), "r" (reg)); +} + #define mfdcr(rn) \ ({unsigned int rval; \ - if (__builtin_constant_p(rn)) \ + if (__builtin_constant_p(rn) && rn < 1024) \ asm volatile("mfdcr %0," __stringify(rn) \ : "=r" (rval)); \ + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ + rval = mfdcrx(rn); \ else \ rval = __mfdcr(rn); \ rval;}) #define mtdcr(rn, v) \ do { \ - if (__builtin_constant_p(rn)) \ + if (__builtin_constant_p(rn) && rn < 1024) \ asm volatile("mtdcr " __stringify(rn) ",%0" \ : : "r" (v)); \ + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ + mtdcrx(rn, v); \ else \ __mtdcr(rn, v); \ } while (0) @@ -69,8 +92,13 @@ static inline unsigned __mfdcri(int base_addr, int base_data, int reg) unsigned int val; spin_lock_irqsave(&dcr_ind_lock, flags); - __mtdcr(base_addr, reg); - val = __mfdcr(base_data); + if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { + mtdcrx(base_addr, reg); + val = mfdcrx(base_data); + } else { + __mtdcr(base_addr, reg); + val = __mfdcr(base_data); + } spin_unlock_irqrestore(&dcr_ind_lock, flags); return val; } @@ -81,8 +109,13 @@ static inline void __mtdcri(int base_addr, int base_data, int reg, unsigned long flags; spin_lock_irqsave(&dcr_ind_lock, flags); - __mtdcr(base_addr, reg); - __mtdcr(base_data, val); + if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { + mtdcrx(base_addr, reg); + mtdcrx(base_data, val); + } else { + __mtdcr(base_addr, reg); + __mtdcr(base_data, val); + } spin_unlock_irqrestore(&dcr_ind_lock, flags); } @@ -93,9 +126,15 @@ static inline void __dcri_clrset(int base_addr, int base_data, int reg, unsigned int val; spin_lock_irqsave(&dcr_ind_lock, flags); - __mtdcr(base_addr, reg); - val = (__mfdcr(base_data) & ~clr) | set; - __mtdcr(base_data, val); + if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) { + mtdcrx(base_addr, reg); + val = (mfdcrx(base_data) & ~clr) | set; + mtdcrx(base_data, val); + } else { + __mtdcr(base_addr, reg); + val = (__mfdcr(base_data) & ~clr) | set; + __mtdcr(base_data, val); + } spin_unlock_irqrestore(&dcr_ind_lock, flags); } diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h index d13fb68bb5c0..9d6851cfb841 100644 --- a/arch/powerpc/include/asm/dcr.h +++ b/arch/powerpc/include/asm/dcr.h @@ -68,9 +68,9 @@ typedef dcr_host_mmio_t dcr_host_t; * additional helpers to read the DCR * base from the device-tree */ struct device_node; -extern unsigned int dcr_resource_start(struct device_node *np, +extern unsigned int dcr_resource_start(const struct device_node *np, unsigned int index); -extern unsigned int dcr_resource_len(struct device_node *np, +extern unsigned int dcr_resource_len(const struct device_node *np, unsigned int index); #endif /* CONFIG_PPC_DCR */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index dfd504caccc1..7d2277cef09a 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -18,4 +18,16 @@ struct dev_archdata { void *dma_data; }; +static inline void dev_archdata_set_node(struct dev_archdata *ad, + struct device_node *np) +{ + ad->of_node = np; +} + +static inline struct device_node * +dev_archdata_get_node(const struct dev_archdata *ad) +{ + return ad->of_node; +} + #endif /* _ASM_POWERPC_DEVICE_H */ diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h new file mode 100644 index 000000000000..9b198d1b3b2b --- /dev/null +++ b/arch/powerpc/include/asm/disassemble.h @@ -0,0 +1,80 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __ASM_PPC_DISASSEMBLE_H__ +#define __ASM_PPC_DISASSEMBLE_H__ + +#include <linux/types.h> + +static inline unsigned int get_op(u32 inst) +{ + return inst >> 26; +} + +static inline unsigned int get_xop(u32 inst) +{ + return (inst >> 1) & 0x3ff; +} + +static inline unsigned int get_sprn(u32 inst) +{ + return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); +} + +static inline unsigned int get_dcrn(u32 inst) +{ + return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); +} + +static inline unsigned int get_rt(u32 inst) +{ + return (inst >> 21) & 0x1f; +} + +static inline unsigned int get_rs(u32 inst) +{ + return (inst >> 21) & 0x1f; +} + +static inline unsigned int get_ra(u32 inst) +{ + return (inst >> 16) & 0x1f; +} + +static inline unsigned int get_rb(u32 inst) +{ + return (inst >> 11) & 0x1f; +} + +static inline unsigned int get_rc(u32 inst) +{ + return inst & 0x1; +} + +static inline unsigned int get_ws(u32 inst) +{ + return (inst >> 11) & 0x1f; +} + +static inline unsigned int get_d(u32 inst) +{ + return inst & 0xffff; +} + +#endif /* __ASM_PPC_DISASSEMBLE_H__ */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index fddb229bd74f..86cef7ddc8d5 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -60,12 +60,6 @@ struct dma_mapping_ops { dma_addr_t *dma_handle, gfp_t flag); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); - dma_addr_t (*map_single)(struct device *dev, void *ptr, - size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs); - void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs); int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, struct dma_attrs *attrs); @@ -82,6 +76,22 @@ struct dma_mapping_ops { dma_addr_t dma_address, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs); +#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS + void (*sync_single_range_for_cpu)(struct device *hwdev, + dma_addr_t dma_handle, unsigned long offset, + size_t size, + enum dma_data_direction direction); + void (*sync_single_range_for_device)(struct device *hwdev, + dma_addr_t dma_handle, unsigned long offset, + size_t size, + enum dma_data_direction direction); + void (*sync_sg_for_cpu)(struct device *hwdev, + struct scatterlist *sg, int nelems, + enum dma_data_direction direction); + void (*sync_sg_for_device)(struct device *hwdev, + struct scatterlist *sg, int nelems, + enum dma_data_direction direction); +#endif }; /* @@ -149,10 +159,9 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) } /* - * TODO: map_/unmap_single will ideally go away, to be completely - * replaced by map/unmap_page. Until then, we allow dma_ops to have - * one or the other, or both by checking to see if the specific - * function requested exists; and if not, falling back on the other set. + * map_/unmap_single actually call through to map/unmap_page now that all the + * dma_mapping_ops have been converted over. We just have to get the page and + * offset to pass through to map_page */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *cpu_addr, @@ -164,10 +173,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, BUG_ON(!dma_ops); - if (dma_ops->map_single) - return dma_ops->map_single(dev, cpu_addr, size, direction, - attrs); - return dma_ops->map_page(dev, virt_to_page(cpu_addr), (unsigned long)cpu_addr % PAGE_SIZE, size, direction, attrs); @@ -183,11 +188,6 @@ static inline void dma_unmap_single_attrs(struct device *dev, BUG_ON(!dma_ops); - if (dma_ops->unmap_single) { - dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); - return; - } - dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); } @@ -201,12 +201,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, BUG_ON(!dma_ops); - if (dma_ops->map_page) - return dma_ops->map_page(dev, page, offset, size, direction, - attrs); - - return dma_ops->map_single(dev, page_address(page) + offset, size, - direction, attrs); + return dma_ops->map_page(dev, page, offset, size, direction, attrs); } static inline void dma_unmap_page_attrs(struct device *dev, @@ -219,12 +214,7 @@ static inline void dma_unmap_page_attrs(struct device *dev, BUG_ON(!dma_ops); - if (dma_ops->unmap_page) { - dma_ops->unmap_page(dev, dma_address, size, direction, attrs); - return; - } - - dma_ops->unmap_single(dev, dma_address, size, direction, attrs); + dma_ops->unmap_page(dev, dma_address, size, direction, attrs); } static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, @@ -308,47 +298,107 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); } +#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - BUG_ON(direction == DMA_NONE); - __dma_sync(bus_to_virt(dma_handle), size, direction); + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, + size, direction); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - BUG_ON(direction == DMA_NONE); - __dma_sync(bus_to_virt(dma_handle), size, direction); + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + dma_ops->sync_single_range_for_device(dev, dma_handle, + 0, size, direction); } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { - struct scatterlist *sg; - int i; + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - BUG_ON(direction == DMA_NONE); + BUG_ON(!dma_ops); + dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); +} + +static inline void dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction direction) +{ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + dma_ops->sync_sg_for_device(dev, sgl, nents, direction); +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - for_each_sg(sgl, sg, nents, i) - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); + BUG_ON(!dma_ops); + dma_ops->sync_single_range_for_cpu(dev, dma_handle, + offset, size, direction); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + dma_ops->sync_single_range_for_device(dev, dma_handle, offset, + size, direction); +} +#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ +static inline void dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction direction) +{ } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { - struct scatterlist *sg; - int i; +} - BUG_ON(direction == DMA_NONE); +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ +} - for_each_sg(sgl, sg, nents, i) - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ } +#endif static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { @@ -382,22 +432,6 @@ static inline int dma_get_cache_alignment(void) #endif } -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - /* just sync everything for now */ - dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - /* just sync everything for now */ - dma_sync_single_for_device(dev, dma_handle, offset + size, direction); -} - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index b886bec67016..66ea9b8b95c5 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -17,8 +17,8 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifndef _PPC64_EEH_H -#define _PPC64_EEH_H +#ifndef _POWERPC_EEH_H +#define _POWERPC_EEH_H #ifdef __KERNEL__ #include <linux/init.h> @@ -110,6 +110,7 @@ static inline void eeh_remove_bus_device(struct pci_dev *dev) { } #define EEH_IO_ERROR_VALUE(size) (-1UL) #endif /* CONFIG_EEH */ +#ifdef CONFIG_PPC64 /* * MMIO read/write operations with EEH support. */ @@ -207,5 +208,6 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf, eeh_check_failure(addr, *(u32*)buf); } +#endif /* CONFIG_PPC64 */ #endif /* __KERNEL__ */ -#endif /* _PPC64_EEH_H */ +#endif /* _POWERPC_EEH_H */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index d812929390e4..cd46f023ec6d 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -267,7 +267,7 @@ extern int ucache_bsize; #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); + int uses_interp); #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index a1029967620b..e4094a5cb05b 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -81,6 +81,36 @@ label##5: \ #define ALT_FTR_SECTION_END_IFCLR(msk) \ ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97) +/* MMU feature dependent sections */ +#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) +#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97) + +#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \ + FTR_SECTION_ELSE_NESTED(label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup) + +#define END_MMU_FTR_SECTION(msk, val) \ + END_MMU_FTR_SECTION_NESTED(msk, val, 97) + +#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk)) +#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0) + +/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */ +#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label) +#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97) +#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup) +#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \ + ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label) +#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ + ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label) +#define ALT_MMU_FTR_SECTION_END(msk, val) \ + ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97) +#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \ + ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97) +#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \ + ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97) + /* Firmware feature dependent sections */ #define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) #define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97) diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index b298f7a631e6..e5f2ae8362f7 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -7,7 +7,19 @@ #ifndef __ASSEMBLY__ extern void _mcount(void); -#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* reloction of mcount call site is the same as the address */ + return addr; +} + +struct dyn_arch_ftrace { + struct module *mod; +}; +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 91c589520c0a..04e4a620952e 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -38,9 +38,24 @@ extern pte_t *pkmap_page_table; * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ -#define LAST_PKMAP (1 << PTE_SHIFT) -#define LAST_PKMAP_MASK (LAST_PKMAP-1) +/* + * We use one full pte table with 4K pages. And with 16K/64K pages pte + * table covers enough memory (32MB and 512MB resp.) that both FIXMAP + * and PKMAP can be placed in single pte table. We use 1024 pages for + * PKMAP in case of 16K/64K pages. + */ +#ifdef CONFIG_PPC_4K_PAGES +#define PKMAP_ORDER PTE_SHIFT +#else +#define PKMAP_ORDER 10 +#endif +#define LAST_PKMAP (1 << PKMAP_ORDER) +#ifndef CONFIG_PPC_4K_PAGES +#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) +#else #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) +#endif +#define LAST_PKMAP_MASK (LAST_PKMAP-1) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) @@ -85,7 +100,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro BUG_ON(!pte_none(*(kmap_pte-idx))); #endif __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); - flush_tlb_page(NULL, vaddr); + local_flush_tlb_page(NULL, vaddr); return (void*) vaddr; } @@ -113,7 +128,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) * this pte without first remap it */ pte_clear(&init_mm, vaddr, kmap_pte-idx); - flush_tlb_page(NULL, vaddr); + local_flush_tlb_page(NULL, vaddr); #endif pagefault_enable(); } diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 26f0d0ab27a5..b1dafb6a9743 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -18,6 +18,12 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); /* + * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs + * to override the version in mm/hugetlb.c + */ +#define vma_mmu_pagesize vma_mmu_pagesize + +/* * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 08266d2728b3..494cd8b0a278 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -713,13 +713,6 @@ static inline void * phys_to_virt(unsigned long address) */ #define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) -/* We do NOT want virtual merging, it would put too much pressure on - * our iommu allocator. Instead, we want drivers to be smart enough - * to coalesce sglists that happen to have been mapped in a contiguous - * way by the iommu - */ -#define BIO_VMERGE_BOUNDARY 0 - /* * 32 bits still uses virt_to_bus() for it's implementation of DMA * mappings se we have to keep it defined here. We also have some old diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h index 279a6229584b..1842186d872c 100644 --- a/arch/powerpc/include/asm/ioctls.h +++ b/arch/powerpc/include/asm/ioctls.h @@ -89,6 +89,8 @@ #define TIOCSBRK 0x5427 /* BSD compatibility */ #define TIOCCBRK 0x5428 /* BSD compatibility */ #define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGRS485 0x542e +#define TIOCSRS485 0x542f #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h index b07ebb9784d3..5ebfe5d3c61f 100644 --- a/arch/powerpc/include/asm/kdump.h +++ b/arch/powerpc/include/asm/kdump.h @@ -1,6 +1,8 @@ #ifndef _PPC64_KDUMP_H #define _PPC64_KDUMP_H +#include <asm/page.h> + /* Kdump kernel runs at 32 MB, change at your peril. */ #define KDUMP_KERNELBASE 0x2000000 @@ -11,8 +13,19 @@ #ifdef CONFIG_CRASH_DUMP +/* + * On PPC64 translation is disabled during trampoline setup, so we use + * physical addresses. Though on PPC32 translation is already enabled, + * so we can't do the same. Luckily create_trampoline() creates relative + * branches, so we can just add the PAGE_OFFSET and don't worry about it. + */ +#ifdef __powerpc64__ #define KDUMP_TRAMPOLINE_START 0x0100 #define KDUMP_TRAMPOLINE_END 0x3000 +#else +#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET) +#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET) +#endif /* __powerpc64__ */ #define KDUMP_MIN_TCE_ENTRIES 2048 diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 3736d9b33289..7e06b43720d3 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -33,12 +33,12 @@ #ifndef __ASSEMBLY__ #include <linux/cpumask.h> +#include <asm/reg.h> typedef void (*crash_shutdown_t)(void); #ifdef CONFIG_KEXEC -#ifdef __powerpc64__ /* * This function is responsible for capturing register states if coming * via panic or invoking dump using sysrq-trigger. @@ -48,67 +48,9 @@ static inline void crash_setup_regs(struct pt_regs *newregs, { if (oldregs) memcpy(newregs, oldregs, sizeof(*newregs)); - else { - /* FIXME Merge this with xmon_save_regs ?? */ - unsigned long tmp1, tmp2; - __asm__ __volatile__ ( - "std 0,0(%2)\n" - "std 1,8(%2)\n" - "std 2,16(%2)\n" - "std 3,24(%2)\n" - "std 4,32(%2)\n" - "std 5,40(%2)\n" - "std 6,48(%2)\n" - "std 7,56(%2)\n" - "std 8,64(%2)\n" - "std 9,72(%2)\n" - "std 10,80(%2)\n" - "std 11,88(%2)\n" - "std 12,96(%2)\n" - "std 13,104(%2)\n" - "std 14,112(%2)\n" - "std 15,120(%2)\n" - "std 16,128(%2)\n" - "std 17,136(%2)\n" - "std 18,144(%2)\n" - "std 19,152(%2)\n" - "std 20,160(%2)\n" - "std 21,168(%2)\n" - "std 22,176(%2)\n" - "std 23,184(%2)\n" - "std 24,192(%2)\n" - "std 25,200(%2)\n" - "std 26,208(%2)\n" - "std 27,216(%2)\n" - "std 28,224(%2)\n" - "std 29,232(%2)\n" - "std 30,240(%2)\n" - "std 31,248(%2)\n" - "mfmsr %0\n" - "std %0, 264(%2)\n" - "mfctr %0\n" - "std %0, 280(%2)\n" - "mflr %0\n" - "std %0, 288(%2)\n" - "bl 1f\n" - "1: mflr %1\n" - "std %1, 256(%2)\n" - "mtlr %0\n" - "mfxer %0\n" - "std %0, 296(%2)\n" - : "=&r" (tmp1), "=&r" (tmp2) - : "b" (newregs) - : "memory"); - } + else + ppc_save_regs(newregs); } -#else -/* - * Provide a dummy definition to avoid build failures. Will remain - * empty till crash dump support is enabled. - */ -static inline void crash_setup_regs(struct pt_regs *newregs, - struct pt_regs *oldregs) { } -#endif /* !__powerpc64 __ */ extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h new file mode 100644 index 000000000000..f49031b632ca --- /dev/null +++ b/arch/powerpc/include/asm/kvm_44x.h @@ -0,0 +1,61 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __ASM_44X_H__ +#define __ASM_44X_H__ + +#include <linux/kvm_host.h> + +#define PPC44x_TLB_SIZE 64 + +/* If the guest is expecting it, this can be as large as we like; we'd just + * need to find some way of advertising it. */ +#define KVM44x_GUEST_TLB_SIZE 64 + +struct kvmppc_44x_shadow_ref { + struct page *page; + u16 gtlb_index; + u8 writeable; + u8 tid; +}; + +struct kvmppc_vcpu_44x { + /* Unmodified copy of the guest's TLB. */ + struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE]; + + /* References to guest pages in the hardware TLB. */ + struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE]; + + /* State of the shadow TLB at guest context switch time. */ + struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE]; + u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; + + struct kvm_vcpu vcpu; +}; + +static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu) +{ + return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu); +} + +void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid); +void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu); +void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu); + +#endif /* __ASM_44X_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 34b52b7180cd..c1e436fe7738 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -64,27 +64,58 @@ struct kvm_vcpu_stat { u32 halt_wakeup; }; -struct tlbe { +struct kvmppc_44x_tlbe { u32 tid; /* Only the low 8 bits are used. */ u32 word0; u32 word1; u32 word2; }; -struct kvm_arch { +enum kvm_exit_types { + MMIO_EXITS, + DCR_EXITS, + SIGNAL_EXITS, + ITLB_REAL_MISS_EXITS, + ITLB_VIRT_MISS_EXITS, + DTLB_REAL_MISS_EXITS, + DTLB_VIRT_MISS_EXITS, + SYSCALL_EXITS, + ISI_EXITS, + DSI_EXITS, + EMULATED_INST_EXITS, + EMULATED_MTMSRWE_EXITS, + EMULATED_WRTEE_EXITS, + EMULATED_MTSPR_EXITS, + EMULATED_MFSPR_EXITS, + EMULATED_MTMSR_EXITS, + EMULATED_MFMSR_EXITS, + EMULATED_TLBSX_EXITS, + EMULATED_TLBWE_EXITS, + EMULATED_RFI_EXITS, + DEC_EXITS, + EXT_INTR_EXITS, + HALT_WAKEUP, + USR_PR_INST, + FP_UNAVAIL, + DEBUG_EXITS, + TIMEINGUEST, + __NUMBER_OF_KVM_EXIT_TYPES }; -struct kvm_vcpu_arch { - /* Unmodified copy of the guest's TLB. */ - struct tlbe guest_tlb[PPC44x_TLB_SIZE]; - /* TLB that's actually used when the guest is running. */ - struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; - /* Pages which are referenced in the shadow TLB. */ - struct page *shadow_pages[PPC44x_TLB_SIZE]; +/* allow access to big endian 32bit upper/lower parts and 64bit var */ +struct kvmppc_exit_timing { + union { + u64 tv64; + struct { + u32 tbu, tbl; + } tv32; + }; +}; - /* Track which TLB entries we've modified in the current exit. */ - u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; +struct kvm_arch { +}; +struct kvm_vcpu_arch { u32 host_stack; u32 host_pid; u32 host_dbcr0; @@ -94,32 +125,32 @@ struct kvm_vcpu_arch { u32 host_msr; u64 fpr[32]; - u32 gpr[32]; + ulong gpr[32]; - u32 pc; + ulong pc; u32 cr; - u32 ctr; - u32 lr; - u32 xer; + ulong ctr; + ulong lr; + ulong xer; - u32 msr; + ulong msr; u32 mmucr; - u32 sprg0; - u32 sprg1; - u32 sprg2; - u32 sprg3; - u32 sprg4; - u32 sprg5; - u32 sprg6; - u32 sprg7; - u32 srr0; - u32 srr1; - u32 csrr0; - u32 csrr1; - u32 dsrr0; - u32 dsrr1; - u32 dear; - u32 esr; + ulong sprg0; + ulong sprg1; + ulong sprg2; + ulong sprg3; + ulong sprg4; + ulong sprg5; + ulong sprg6; + ulong sprg7; + ulong srr0; + ulong srr1; + ulong csrr0; + ulong csrr1; + ulong dsrr0; + ulong dsrr1; + ulong dear; + ulong esr; u32 dec; u32 decar; u32 tbl; @@ -127,7 +158,7 @@ struct kvm_vcpu_arch { u32 tcr; u32 tsr; u32 ivor[16]; - u32 ivpr; + ulong ivpr; u32 pir; u32 shadow_pid; @@ -140,9 +171,22 @@ struct kvm_vcpu_arch { u32 dbcr0; u32 dbcr1; +#ifdef CONFIG_KVM_EXIT_TIMING + struct kvmppc_exit_timing timing_exit; + struct kvmppc_exit_timing timing_last_enter; + u32 last_exit_type; + u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES]; + u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES]; + u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES]; + u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES]; + u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES]; + u64 timing_last_exit; + struct dentry *debugfs_exit_timing; +#endif + u32 last_inst; - u32 fault_dear; - u32 fault_esr; + ulong fault_dear; + ulong fault_esr; gpa_t paddr_accessed; u8 io_gpr; /* GPR used as IO source/target */ diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index bb62ad876de3..36d2a50a8487 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -29,11 +29,6 @@ #include <linux/kvm_types.h> #include <linux/kvm_host.h> -struct kvm_tlb { - struct tlbe guest_tlb[PPC44x_TLB_SIZE]; - struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; -}; - enum emulation_result { EMULATE_DONE, /* no further processing */ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ @@ -41,9 +36,6 @@ enum emulation_result { EMULATE_FAIL, /* can't emulate this instruction */ }; -extern const unsigned char exception_priority[]; -extern const unsigned char priority_exception[]; - extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern char kvmppc_handlers_start[]; extern unsigned long kvmppc_handler_len; @@ -58,51 +50,44 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); +extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); -extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, - u64 asid, u32 flags); -extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, - gva_t eend, u32 asid); +extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, + u64 asid, u32 flags, u32 max_bytes, + unsigned int gtlb_idx); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); -/* XXX Book E specific */ -extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); - -extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); - -static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) -{ - unsigned int priority = exception_priority[exception]; - set_bit(priority, &vcpu->arch.pending_exceptions); -} - -static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) -{ - unsigned int priority = exception_priority[exception]; - clear_bit(priority, &vcpu->arch.pending_exceptions); -} - -/* Helper function for "full" MSR writes. No need to call this if only EE is - * changing. */ -static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) -{ - if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) - kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); - - vcpu->arch.msr = new_msr; - - if (vcpu->arch.msr & MSR_WE) - kvm_vcpu_block(vcpu); -} - -static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) -{ - if (vcpu->arch.pid != new_pid) { - vcpu->arch.pid = new_pid; - vcpu->arch.swap_pid = 1; - } -} +/* Core-specific hooks */ + +extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, + unsigned int id); +extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu); +extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); +extern int kvmppc_core_check_processor_compat(void); +extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr); + +extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); +extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); + +extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu); +extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu); + +extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); +extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); +extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); +extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); +extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, + struct kvm_interrupt *irq); + +extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int op, int *advance); +extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); +extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); + +extern int kvmppc_booke_init(void); +extern void kvmppc_booke_exit(void); extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h index 612d83276653..84b457a3c1bc 100644 --- a/arch/powerpc/include/asm/local.h +++ b/arch/powerpc/include/asm/local.h @@ -67,7 +67,7 @@ static __inline__ long local_inc_return(local_t *l) bne- 1b" : "=&r" (t) : "r" (&(l->a.counter)) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } @@ -94,7 +94,7 @@ static __inline__ long local_dec_return(local_t *l) bne- 1b" : "=&r" (t) : "r" (&(l->a.counter)) - : "cc", "memory"); + : "cc", "xer", "memory"); return t; } diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 2fe268b10333..25aaa97facd8 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -133,7 +133,8 @@ struct lppaca { //============================================================================= // CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data //============================================================================= - u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF + u32 page_ins; // CMO Hint - # page ins by OS x00-x04 + u8 pmc_save_area[252]; // PMC interrupt Area x04-xFF } __attribute__((__aligned__(0x400))); extern struct lppaca lppaca[]; diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h index 3d108676584c..776f415a36aa 100644 --- a/arch/powerpc/include/asm/mmu-40x.h +++ b/arch/powerpc/include/asm/mmu-40x.h @@ -54,8 +54,9 @@ #ifndef __ASSEMBLY__ typedef struct { - unsigned long id; - unsigned long vdso_base; + unsigned int id; + unsigned int active; + unsigned long vdso_base; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h index a825524c981a..27cc6fdcd3b7 100644 --- a/arch/powerpc/include/asm/mmu-44x.h +++ b/arch/powerpc/include/asm/mmu-44x.h @@ -4,6 +4,8 @@ * PPC440 support */ +#include <asm/page.h> + #define PPC44x_MMUCR_TID 0x000000ff #define PPC44x_MMUCR_STS 0x00010000 @@ -54,10 +56,12 @@ #ifndef __ASSEMBLY__ extern unsigned int tlb_44x_hwater; +extern unsigned int tlb_44x_index; typedef struct { - unsigned long id; - unsigned long vdso_base; + unsigned int id; + unsigned int active; + unsigned long vdso_base; } mm_context_t; #endif /* !__ASSEMBLY__ */ @@ -73,4 +77,19 @@ typedef struct { /* Size of the TLBs used for pinning in lowmem */ #define PPC_PIN_SIZE (1 << 28) /* 256M */ +#if (PAGE_SHIFT == 12) +#define PPC44x_TLBE_SIZE PPC44x_TLB_4K +#elif (PAGE_SHIFT == 14) +#define PPC44x_TLBE_SIZE PPC44x_TLB_16K +#elif (PAGE_SHIFT == 16) +#define PPC44x_TLBE_SIZE PPC44x_TLB_64K +#else +#error "Unsupported PAGE_SIZE" +#endif + +#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2) +#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2) +#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2) +#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT) + #endif /* _ASM_POWERPC_MMU_44X_H_ */ diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 9db877eb88db..07865a357848 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -137,7 +137,8 @@ #ifndef __ASSEMBLY__ typedef struct { - unsigned long id; + unsigned int id; + unsigned int active; unsigned long vdso_base; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-fsl-booke.h index 925d93cf64d8..3f941c0f7e8e 100644 --- a/arch/powerpc/include/asm/mmu-fsl-booke.h +++ b/arch/powerpc/include/asm/mmu-fsl-booke.h @@ -40,6 +40,8 @@ #define MAS2_M 0x00000004 #define MAS2_G 0x00000002 #define MAS2_E 0x00000001 +#define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10)) +#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags)) #define MAS3_RPN 0xFFFFF000 #define MAS3_U0 0x00000200 @@ -74,8 +76,9 @@ #ifndef __ASSEMBLY__ typedef struct { - unsigned long id; - unsigned long vdso_base; + unsigned int id; + unsigned int active; + unsigned long vdso_base; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 4c0e1b4f975c..6e7639911318 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -2,6 +2,63 @@ #define _ASM_POWERPC_MMU_H_ #ifdef __KERNEL__ +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> + +/* + * MMU features bit definitions + */ + +/* + * First half is MMU families + */ +#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001) +#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002) +#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) +#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) +#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) + +/* + * This is individual features + */ + +/* Enable use of high BAT registers */ +#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000) + +/* Enable >32-bit physical addresses on 32-bit processor, only used + * by CONFIG_6xx currently as BookE supports that from day 1 + */ +#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000) + +/* Enable use of broadcast TLB invalidations. We don't always set it + * on processors that support it due to other constraints with the + * use of such invalidations + */ +#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000) + +/* Enable use of tlbilx invalidate-by-PID variant. + */ +#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000) + +/* This indicates that the processor cannot handle multiple outstanding + * broadcast tlbivax or tlbsync. This makes the code use a spinlock + * around such invalidate forms. + */ +#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000) + +#ifndef __ASSEMBLY__ +#include <asm/cputable.h> + +static inline int mmu_has_feature(unsigned long feature) +{ + return (cur_cpu_spec->mmu_features & feature); +} + +extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; + +#endif /* !__ASSEMBLY__ */ + + #ifdef CONFIG_PPC64 /* 64-bit classic hash table MMU */ # include <asm/mmu-hash64.h> diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 6b993ef452ff..ab4f19263c42 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -2,237 +2,26 @@ #define __ASM_POWERPC_MMU_CONTEXT_H #ifdef __KERNEL__ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/spinlock.h> #include <asm/mmu.h> #include <asm/cputable.h> #include <asm-generic/mm_hooks.h> - -#ifndef CONFIG_PPC64 -#include <asm/atomic.h> -#include <linux/bitops.h> - -/* - * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs - * (virtual segment identifiers) for each context. Although the - * hardware supports 24-bit VSIDs, and thus >1 million contexts, - * we only use 32,768 of them. That is ample, since there can be - * at most around 30,000 tasks in the system anyway, and it means - * that we can use a bitmap to indicate which contexts are in use. - * Using a bitmap means that we entirely avoid all of the problems - * that we used to have when the context number overflowed, - * particularly on SMP systems. - * -- paulus. - */ - -/* - * This function defines the mapping from contexts to VSIDs (virtual - * segment IDs). We use a skew on both the context and the high 4 bits - * of the 32-bit virtual address (the "effective segment ID") in order - * to spread out the entries in the MMU hash table. Note, if this - * function is changed then arch/ppc/mm/hashtable.S will have to be - * changed to correspond. - */ -#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ - & 0xffffff) - -/* - The MPC8xx has only 16 contexts. We rotate through them on each - task switch. A better way would be to keep track of tasks that - own contexts, and implement an LRU usage. That way very active - tasks don't always have to pay the TLB reload overhead. The - kernel pages are mapped shared, so the kernel can run on behalf - of any task that makes a kernel entry. Shared does not mean they - are not protected, just that the ASID comparison is not performed. - -- Dan - - The IBM4xx has 256 contexts, so we can just rotate through these - as a way of "switching" contexts. If the TID of the TLB is zero, - the PID/TID comparison is disabled, so we can use a TID of zero - to represent all kernel pages as shared among all contexts. - -- Dan - */ - -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - -#ifdef CONFIG_8xx -#define NO_CONTEXT 16 -#define LAST_CONTEXT 15 -#define FIRST_CONTEXT 0 - -#elif defined(CONFIG_4xx) -#define NO_CONTEXT 256 -#define LAST_CONTEXT 255 -#define FIRST_CONTEXT 1 - -#elif defined(CONFIG_E200) || defined(CONFIG_E500) -#define NO_CONTEXT 256 -#define LAST_CONTEXT 255 -#define FIRST_CONTEXT 1 - -#else - -/* PPC 6xx, 7xx CPUs */ -#define NO_CONTEXT ((unsigned long) -1) -#define LAST_CONTEXT 32767 -#define FIRST_CONTEXT 1 -#endif - -/* - * Set the current MMU context. - * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by - * loading up the segment registers for the user part of the address space. - * - * Since the PGD is immediately available, it is much faster to simply - * pass this along as a second parameter, which is required for 8xx and - * can be used for debugging on all processors (if you happen to have - * an Abatron). - */ -extern void set_context(unsigned long contextid, pgd_t *pgd); - -/* - * Bitmap of contexts in use. - * The size of this bitmap is LAST_CONTEXT + 1 bits. - */ -extern unsigned long context_map[]; - -/* - * This caches the next context number that we expect to be free. - * Its use is an optimization only, we can't rely on this context - * number to be free, but it usually will be. - */ -extern unsigned long next_mmu_context; - -/* - * If we don't have sufficient contexts to give one to every task - * that could be in the system, we need to be able to steal contexts. - * These variables support that. - */ -#if LAST_CONTEXT < 30000 -#define FEW_CONTEXTS 1 -extern atomic_t nr_free_contexts; -extern struct mm_struct *context_mm[LAST_CONTEXT+1]; -extern void steal_context(void); -#endif - -/* - * Get a new mmu context for the address space described by `mm'. - */ -static inline void get_mmu_context(struct mm_struct *mm) -{ - unsigned long ctx; - - if (mm->context.id != NO_CONTEXT) - return; -#ifdef FEW_CONTEXTS - while (atomic_dec_if_positive(&nr_free_contexts) < 0) - steal_context(); -#endif - ctx = next_mmu_context; - while (test_and_set_bit(ctx, context_map)) { - ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); - if (ctx > LAST_CONTEXT) - ctx = 0; - } - next_mmu_context = (ctx + 1) & LAST_CONTEXT; - mm->context.id = ctx; -#ifdef FEW_CONTEXTS - context_mm[ctx] = mm; -#endif -} - -/* - * Set up the context for a new address space. - */ -static inline int init_new_context(struct task_struct *t, struct mm_struct *mm) -{ - mm->context.id = NO_CONTEXT; - return 0; -} - -/* - * We're finished using the context for an address space. - */ -static inline void destroy_context(struct mm_struct *mm) -{ - preempt_disable(); - if (mm->context.id != NO_CONTEXT) { - clear_bit(mm->context.id, context_map); - mm->context.id = NO_CONTEXT; -#ifdef FEW_CONTEXTS - atomic_inc(&nr_free_contexts); -#endif - } - preempt_enable(); -} - -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) -{ -#ifdef CONFIG_ALTIVEC - if (cpu_has_feature(CPU_FTR_ALTIVEC)) - asm volatile ("dssall;\n" -#ifndef CONFIG_POWER4 - "sync;\n" /* G4 needs a sync here, G5 apparently not */ -#endif - : : ); -#endif /* CONFIG_ALTIVEC */ - - tsk->thread.pgdir = next->pgd; - - /* No need to flush userspace segments if the mm doesnt change */ - if (prev == next) - return; - - /* Setup new userspace context */ - get_mmu_context(next); - set_context(next->context.id, next->pgd); -} - -#define deactivate_mm(tsk,mm) do { } while (0) +#include <asm/cputhreads.h> /* - * After we have set current->mm to a new value, this activates - * the context for the new mm so we see the new mappings. + * Most if the context management is out of line */ -#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current) - extern void mmu_context_init(void); - - -#else - -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/sched.h> - -/* - * Copyright (C) 2001 PPC 64 Team, IBM Corp - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -static inline void enter_lazy_tlb(struct mm_struct *mm, - struct task_struct *tsk) -{ -} - -/* - * The proto-VSID space has 2^35 - 1 segments available for user mappings. - * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, - * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). - */ -#define NO_CONTEXT 0 -#define MAX_CONTEXT ((1UL << 19) - 1) - extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm); +extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm); extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); +extern void set_context(unsigned long id, pgd_t *pgd); /* * switch_mm is the entry point called from the architecture independent @@ -241,22 +30,39 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask)) - cpu_set(smp_processor_id(), next->cpu_vm_mask); + /* Mark this context has been used on the new CPU */ + cpu_set(smp_processor_id(), next->cpu_vm_mask); + + /* 32-bit keeps track of the current PGDIR in the thread struct */ +#ifdef CONFIG_PPC32 + tsk->thread.pgdir = next->pgd; +#endif /* CONFIG_PPC32 */ - /* No need to flush userspace segments if the mm doesnt change */ + /* Nothing else to do if we aren't actually switching */ if (prev == next) return; + /* We must stop all altivec streams before changing the HW + * context + */ #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC)) asm volatile ("dssall"); #endif /* CONFIG_ALTIVEC */ + /* The actual HW switching method differs between the various + * sub architectures. + */ +#ifdef CONFIG_PPC_STD_MMU_64 if (cpu_has_feature(CPU_FTR_SLB)) switch_slb(tsk, next); else switch_stab(tsk, next); +#else + /* Out of line for now */ + switch_mmu_context(prev, next); +#endif + } #define deactivate_mm(tsk,mm) do { } while (0) @@ -274,6 +80,11 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) local_irq_restore(flags); } -#endif /* CONFIG_PPC64 */ +/* We don't currently use enter_lazy_tlb() for anything */ +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index e5f14b13ccf0..08454880a2c0 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -34,11 +34,19 @@ struct mod_arch_specific { #ifdef __powerpc64__ unsigned int stubs_section; /* Index of stubs section in module */ unsigned int toc_section; /* What section is the TOC? */ -#else +#ifdef CONFIG_DYNAMIC_FTRACE + unsigned long toc; + unsigned long tramp; +#endif + +#else /* powerpc64 */ /* Indices of PLT sections within module. */ unsigned int core_plt_section; unsigned int init_plt_section; +#ifdef CONFIG_DYNAMIC_FTRACE + unsigned long tramp; #endif +#endif /* powerpc64 */ /* List of BUG addresses, source line numbers and filenames */ struct list_head bug_list; @@ -68,6 +76,12 @@ struct mod_arch_specific { # endif /* MODULE */ #endif +#ifdef CONFIG_DYNAMIC_FTRACE +# ifdef MODULE + asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous"); +# endif /* MODULE */ +#endif + struct exception_table_entry; void sort_ex_table(struct exception_table_entry *start, diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h index 81ef10b6b672..81a23932a160 100644 --- a/arch/powerpc/include/asm/mpc52xx.h +++ b/arch/powerpc/include/asm/mpc52xx.h @@ -239,6 +239,25 @@ struct mpc52xx_cdm { u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */ }; +/* Interrupt controller Register set */ +struct mpc52xx_intr { + u32 per_mask; /* INTR + 0x00 */ + u32 per_pri1; /* INTR + 0x04 */ + u32 per_pri2; /* INTR + 0x08 */ + u32 per_pri3; /* INTR + 0x0c */ + u32 ctrl; /* INTR + 0x10 */ + u32 main_mask; /* INTR + 0x14 */ + u32 main_pri1; /* INTR + 0x18 */ + u32 main_pri2; /* INTR + 0x1c */ + u32 reserved1; /* INTR + 0x20 */ + u32 enc_status; /* INTR + 0x24 */ + u32 crit_status; /* INTR + 0x28 */ + u32 main_status; /* INTR + 0x2c */ + u32 per_status; /* INTR + 0x30 */ + u32 reserved2; /* INTR + 0x34 */ + u32 per_error; /* INTR + 0x38 */ +}; + #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h index 8917ed630565..a218da6bec7c 100644 --- a/arch/powerpc/include/asm/mpc52xx_psc.h +++ b/arch/powerpc/include/asm/mpc52xx_psc.h @@ -68,12 +68,20 @@ #define MPC52xx_PSC_IMR_ORERR 0x1000 #define MPC52xx_PSC_IMR_IPC 0x8000 -/* PSC input port change bit */ +/* PSC input port change bits */ #define MPC52xx_PSC_CTS 0x01 #define MPC52xx_PSC_DCD 0x02 #define MPC52xx_PSC_D_CTS 0x10 #define MPC52xx_PSC_D_DCD 0x20 +/* PSC acr bits */ +#define MPC52xx_PSC_IEC_CTS 0x01 +#define MPC52xx_PSC_IEC_DCD 0x02 + +/* PSC output port bits */ +#define MPC52xx_PSC_OP_RTS 0x01 +#define MPC52xx_PSC_OP_RES 0x02 + /* PSC mode fields */ #define MPC52xx_PSC_MODE_5_BITS 0x00 #define MPC52xx_PSC_MODE_6_BITS 0x01 @@ -91,6 +99,7 @@ #define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00 #define MPC52xx_PSC_MODE_ONE_STOP 0x07 #define MPC52xx_PSC_MODE_TWO_STOP 0x0f +#define MPC52xx_PSC_MODE_TXCTS 0x10 #define MPC52xx_PSC_RFNUM_MASK 0x01ff diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h index 458c1f7fbc18..dabc01c727b8 100644 --- a/arch/powerpc/include/asm/mutex.h +++ b/arch/powerpc/include/asm/mutex.h @@ -1,9 +1,134 @@ /* - * Pull in the generic implementation for the mutex fastpath. + * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm + */ +#ifndef _ASM_POWERPC_MUTEX_H +#define _ASM_POWERPC_MUTEX_H + +static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new) +{ + int t; + + __asm__ __volatile__ ( +"1: lwarx %0,0,%1 # mutex trylock\n\ + cmpw 0,%0,%2\n\ + bne- 2f\n" + PPC405_ERR77(0,%1) +" stwcx. %3,0,%1\n\ + bne- 1b" + ISYNC_ON_SMP + "\n\ +2:" + : "=&r" (t) + : "r" (&v->counter), "r" (old), "r" (new) + : "cc", "memory"); + + return t; +} + +static inline int __mutex_dec_return_lock(atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%1 # mutex lock\n\ + addic %0,%0,-1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +static inline int __mutex_inc_return_unlock(atomic_t *v) +{ + int t; + + __asm__ __volatile__( + LWSYNC_ON_SMP +"1: lwarx %0,0,%1 # mutex unlock\n\ + addic %0,%0,1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1 \n\ + bne- 1b" + : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function MUST leave the value lower than + * 1 even when the "1" assertion wasn't true. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(__mutex_dec_return_lock(count) < 0)) + fail_fn(count); +} + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, + * or anything the slow path function returns. + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (unlikely(__mutex_dec_return_lock(count) < 0)) + return fail_fn(count); + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the count from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. + * In the failure case, this function is allowed to either set the value to + * 1, or to set it to a value lower than 1. + */ +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(__mutex_inc_return_unlock(count) <= 0)) + fail_fn(count); +} + +#define __mutex_slowpath_needs_to_unlock() 1 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: fallback function * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) + * Change the count from 1 to 0, and return 1 (success), or if the count + * was not 1, then return 0 (failure). */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1)) + return 1; + return 0; +} -#include <asm-generic/mutex-dec.h> +#endif diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index c0b8d4a29a91..197d569f5bd3 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -19,12 +19,15 @@ #include <asm/kdump.h> /* - * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software + * On regular PPC32 page size is 4K (but we support 4K/16K/64K pages + * on PPC44x). For PPC64 we support either 4K or 64K software * page size. When using 64K pages however, whether we are really supporting * 64K pages in HW or not is irrelevant to those definitions. */ -#ifdef CONFIG_PPC_64K_PAGES +#if defined(CONFIG_PPC_64K_PAGES) #define PAGE_SHIFT 16 +#elif defined(CONFIG_PPC_16K_PAGES) +#define PAGE_SHIFT 14 #else #define PAGE_SHIFT 12 #endif @@ -151,7 +154,7 @@ typedef struct { pte_basic_t pte; } pte_t; /* 64k pages additionally define a bigger "real PTE" type that gathers * the "second half" part of the PTE for pseudo 64k pages */ -#ifdef CONFIG_PPC_64K_PAGES +#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; #else typedef struct { pte_t pte; } real_pte_t; @@ -191,10 +194,10 @@ typedef pte_basic_t pte_t; #define pte_val(x) (x) #define __pte(x) (x) -#ifdef CONFIG_PPC_64K_PAGES +#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; #else -typedef unsigned long real_pte_t; +typedef pte_t real_pte_t; #endif diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index d77072a32cc6..1458d9500381 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -19,6 +19,8 @@ #define PTE_FLAGS_OFFSET 0 #endif +#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */ + #ifndef __ASSEMBLY__ /* * The basic type of a PTE - 64 bits for those CPUs with > 32 bit @@ -26,10 +28,8 @@ */ #ifdef CONFIG_PTE_64BIT typedef unsigned long long pte_basic_t; -#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */ #else typedef unsigned long pte_basic_t; -#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ #endif struct page; @@ -39,6 +39,9 @@ extern void copy_page(void *to, void *from); #include <asm-generic/page.h> +#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) +#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PAGE_32_H */ diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 9047af7baa69..84007afabdb5 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -13,7 +13,6 @@ struct device_node; -extern unsigned int ppc_pci_flags; enum { /* Force re-assigning all resources (ignore firmware * setup completely) @@ -36,6 +35,31 @@ enum { /* ... except for domain 0 */ PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020, }; +#ifdef CONFIG_PCI +extern unsigned int ppc_pci_flags; + +static inline void ppc_pci_set_flags(int flags) +{ + ppc_pci_flags = flags; +} + +static inline void ppc_pci_add_flags(int flags) +{ + ppc_pci_flags |= flags; +} + +static inline int ppc_pci_has_flag(int flag) +{ + return (ppc_pci_flags & flag); +} +#else +static inline void ppc_pci_set_flags(int flags) { } +static inline void ppc_pci_add_flags(int flags) { } +static inline int ppc_pci_has_flag(int flag) +{ + return 0; +} +#endif /* @@ -241,9 +265,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus); /** Discover new pci devices under this bus, and add them */ extern void pcibios_add_pci_devices(struct pci_bus *bus); -extern void pcibios_fixup_new_pci_devices(struct pci_bus *bus); - -extern int pcibios_remove_root_bus(struct pci_controller *phb); static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) { @@ -290,6 +311,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, /* Allocate & free a PCI host bridge structure */ extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); extern void pcibios_free_controller(struct pci_controller *phb); +extern void pcibios_setup_phb_resources(struct pci_controller *hose); #ifdef CONFIG_PCI extern unsigned long pci_address_to_pio(phys_addr_t address); diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 57a2a494886b..3548159a1beb 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -38,8 +38,8 @@ struct pci_dev; * Set this to 1 if you want the kernel to re-assign all PCI * bus numbers (don't do that on ppc64 yet !) */ -#define pcibios_assign_all_busses() (ppc_pci_flags & \ - PPC_PCI_REASSIGN_ALL_BUS) +#define pcibios_assign_all_busses() \ + (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS)) #define pcibios_scan_all_fns(a, b) 0 static inline void pcibios_set_master(struct pci_dev *dev) @@ -204,15 +204,14 @@ static inline struct resource *pcibios_select_root(struct pci_dev *pdev, return root; } -extern void pcibios_setup_new_device(struct pci_dev *dev); - extern void pcibios_claim_one_bus(struct pci_bus *b); -extern void pcibios_allocate_bus_resources(struct pci_bus *bus); +extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); extern void pcibios_resource_survey(void); extern struct pci_controller *init_phb_dynamic(struct device_node *dn); +extern int remove_phb_dynamic(struct pci_controller *phb); extern struct pci_dev *of_create_pci_dev(struct device_node *node, struct pci_bus *bus, int devfn); @@ -221,6 +220,7 @@ extern void of_scan_pci_bridge(struct device_node *node, struct pci_dev *dev); extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); +extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); extern int pci_read_irq_line(struct pci_dev *dev); @@ -235,9 +235,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end); -extern void pcibios_do_bus_setup(struct pci_bus *bus); -extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus); - +extern void pcibios_setup_bus_devices(struct pci_bus *bus); +extern void pcibios_setup_bus_self(struct pci_bus *bus); #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_PCI_H */ diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 58c07147b3ea..0815eb40acae 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -3,6 +3,8 @@ #include <linux/threads.h> +#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */ + extern void __bad_pte(pmd_t *pmd); extern pgd_t *pgd_alloc(struct mm_struct *mm); @@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); -extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte); -extern void pte_free(struct mm_struct *mm, pgtable_t pte); -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +static inline void pgtable_free(pgtable_free_t pgf) +{ + void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); + + free_page((unsigned long)p); +} #define check_pgt_cache() do { } while (0) diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 812a1d8f35cb..afda2bdd860f 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -7,7 +7,6 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/mm.h> #include <linux/slab.h> #include <linux/cpumask.h> #include <linux/percpu.h> @@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, return page; } -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) -{ - pgtable_page_dtor(ptepage); - __free_page(ptepage); -} - -#define PGF_CACHENUM_MASK 0x7 - -typedef struct pgtable_free { - unsigned long val; -} pgtable_free_t; - -static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, - unsigned long mask) -{ - BUG_ON(cachenum > PGF_CACHENUM_MASK); - - return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; -} - static inline void pgtable_free(pgtable_free_t pgf) { void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); @@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf) kmem_cache_free(pgtable_cache[cachenum], p); } -extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); - -#define __pte_free_tlb(tlb,ptepage) \ -do { \ - pgtable_page_dtor(ptepage); \ - pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ - PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ -} while (0) #define __pmd_free_tlb(tlb, pmd) \ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index b4505ed0f0f2..5d8480265a77 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -2,11 +2,52 @@ #define _ASM_POWERPC_PGALLOC_H #ifdef __KERNEL__ +#include <linux/mm.h> + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) +{ + pgtable_page_dtor(ptepage); + __free_page(ptepage); +} + +typedef struct pgtable_free { + unsigned long val; +} pgtable_free_t; + +#define PGF_CACHENUM_MASK 0x7 + +static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, + unsigned long mask) +{ + BUG_ON(cachenum > PGF_CACHENUM_MASK); + + return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; +} + #ifdef CONFIG_PPC64 #include <asm/pgalloc-64.h> #else #include <asm/pgalloc-32.h> #endif +extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); + +#ifdef CONFIG_SMP +#define __pte_free_tlb(tlb,ptepage) \ +do { \ + pgtable_page_dtor(ptepage); \ + pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ + PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ +} while (0) +#else +#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +#endif + + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 6ab7c67cb5ab..f69a4d977729 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -228,9 +228,10 @@ extern int icache_44x_need_flush; * - FILE *must* be in the bottom three bits because swap cache * entries use the top 29 bits for TLB2. * - * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it - * doesn't support SMP. So we can use this as software bit, like - * DIRTY. + * - CACHE COHERENT bit (M) has no effect on original PPC440 cores, + * because it doesn't support SMP. However, some later 460 variants + * have -some- form of SMP support and so I keep the bit there for + * future use * * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used * for memory protection related functions (see PTE structure in @@ -436,20 +437,23 @@ extern int icache_44x_need_flush; _PAGE_USER | _PAGE_ACCESSED | \ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ _PAGE_EXEC | _PAGE_HWEXEC) + /* - * Note: the _PAGE_COHERENT bit automatically gets set in the hardware - * PTE if CONFIG_SMP is defined (hash_page does this); there is no need - * to have it in the Linux PTE, and in fact the bit could be reused for - * another purpose. -- paulus. + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. */ - -#ifdef CONFIG_44x -#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED) +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) +#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) #else #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) #endif +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE) + #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) +#define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE) #ifdef CONFIG_PPC_STD_MMU /* On standard PPC MMU, no user access implies kernel read/write access, @@ -459,7 +463,7 @@ extern int icache_44x_need_flush; #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) #endif -#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) +#define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED) #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ @@ -552,9 +556,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } -static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } -static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } - static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } static inline pte_t pte_mkclean(pte_t pte) { @@ -693,10 +694,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, #endif } + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { -#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) +#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM) WARN_ON(pte_present(*ptep)); #endif __set_pte_at(mm, addr, ptep, pte); @@ -760,16 +762,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) __changed; \ }) -/* - * Macro to mark a page protection value as "uncacheable". - */ -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) - -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#define __HAVE_PHYS_MEM_ACCESS_PROT - #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 4c0a8c62859d..b0f18be81d9f 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -100,7 +100,7 @@ #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) -/* __pgprot defined in arch/powerpc/incliude/asm/page.h */ +/* __pgprot defined in arch/powerpc/include/asm/page.h */ #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) @@ -245,9 +245,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } -static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } -static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } - static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW); return pte; } static inline pte_t pte_mkclean(pte_t pte) { @@ -405,16 +402,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) __changed; \ }) -/* - * Macro to mark a page protection value as "uncacheable". - */ -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) - -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#define __HAVE_PHYS_MEM_ACCESS_PROT - #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index dbb8ca172e44..07f55e601696 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -16,6 +16,32 @@ struct mm_struct; #endif #ifndef __ASSEMBLY__ + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE | _PAGE_GUARDED)) + +#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE)) + +#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT)) + +#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT | _PAGE_WRITETHRU)) + + +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#define __HAVE_PHYS_MEM_ACCESS_PROT + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index c4a029ccb4d3..1a0d628eb114 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -425,14 +425,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #define fromreal(rd) tovirt(rd,rd) #define tophys(rd,rs) \ -0: addis rd,rs,-KERNELBASE@h; \ +0: addis rd,rs,-PAGE_OFFSET@h; \ .section ".vtop_fixup","aw"; \ .align 1; \ .long 0b; \ .previous #define tovirt(rd,rs) \ -0: addis rd,rs,KERNELBASE@h; \ +0: addis rd,rs,PAGE_OFFSET@h; \ .section ".ptov_fixup","aw"; \ .align 1; \ .long 0b; \ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 101ed87f7d84..d3466490104a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -69,8 +69,6 @@ extern int _prep_type; #ifdef __KERNEL__ -extern int have_of; - struct task_struct; void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); void release_thread(struct task_struct *); @@ -207,6 +205,11 @@ struct thread_struct { #define INIT_SP_LIMIT \ (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) +#ifdef CONFIG_SPE +#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, +#else +#define SPEFSCR_INIT +#endif #ifdef CONFIG_PPC32 #define INIT_THREAD { \ @@ -215,6 +218,7 @@ struct thread_struct { .fs = KERNEL_DS, \ .pgdir = swapper_pg_dir, \ .fpexc_mode = MSR_FE0 | MSR_FE1, \ + SPEFSCR_INIT \ } #else #define INIT_THREAD { \ diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index eb3bd2e1c7f6..6ff04185d2aa 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -253,6 +253,9 @@ extern void kdump_move_device_tree(void); /* CPU OF node matching */ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); +/* cache lookup */ +struct device_node *of_find_next_cache_node(struct device_node *np); + /* Get the MAC address */ extern const void *of_get_mac_address(struct device_node *np); diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index f9e34c493cbb..eead5c67197a 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -305,30 +305,36 @@ static inline const char* ps3_result(int result) /* system bus routines */ enum ps3_match_id { - PS3_MATCH_ID_EHCI = 1, - PS3_MATCH_ID_OHCI = 2, - PS3_MATCH_ID_GELIC = 3, - PS3_MATCH_ID_AV_SETTINGS = 4, - PS3_MATCH_ID_SYSTEM_MANAGER = 5, - PS3_MATCH_ID_STOR_DISK = 6, - PS3_MATCH_ID_STOR_ROM = 7, - PS3_MATCH_ID_STOR_FLASH = 8, - PS3_MATCH_ID_SOUND = 9, - PS3_MATCH_ID_GRAPHICS = 10, - PS3_MATCH_ID_LPM = 11, + PS3_MATCH_ID_EHCI = 1, + PS3_MATCH_ID_OHCI = 2, + PS3_MATCH_ID_GELIC = 3, + PS3_MATCH_ID_AV_SETTINGS = 4, + PS3_MATCH_ID_SYSTEM_MANAGER = 5, + PS3_MATCH_ID_STOR_DISK = 6, + PS3_MATCH_ID_STOR_ROM = 7, + PS3_MATCH_ID_STOR_FLASH = 8, + PS3_MATCH_ID_SOUND = 9, + PS3_MATCH_ID_GPU = 10, + PS3_MATCH_ID_LPM = 11, }; -#define PS3_MODULE_ALIAS_EHCI "ps3:1" -#define PS3_MODULE_ALIAS_OHCI "ps3:2" -#define PS3_MODULE_ALIAS_GELIC "ps3:3" -#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4" -#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5" -#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6" -#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7" -#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8" -#define PS3_MODULE_ALIAS_SOUND "ps3:9" -#define PS3_MODULE_ALIAS_GRAPHICS "ps3:10" -#define PS3_MODULE_ALIAS_LPM "ps3:11" +enum ps3_match_sub_id { + PS3_MATCH_SUB_ID_GPU_FB = 1, + PS3_MATCH_SUB_ID_GPU_RAMDISK = 2, +}; + +#define PS3_MODULE_ALIAS_EHCI "ps3:1:0" +#define PS3_MODULE_ALIAS_OHCI "ps3:2:0" +#define PS3_MODULE_ALIAS_GELIC "ps3:3:0" +#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4:0" +#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5:0" +#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6:0" +#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7:0" +#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0" +#define PS3_MODULE_ALIAS_SOUND "ps3:9:0" +#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1" +#define PS3_MODULE_ALIAS_GPU_RAMDISK "ps3:10:2" +#define PS3_MODULE_ALIAS_LPM "ps3:11:0" enum ps3_system_bus_device_type { PS3_DEVICE_TYPE_IOC0 = 1, @@ -337,11 +343,6 @@ enum ps3_system_bus_device_type { PS3_DEVICE_TYPE_LPM, }; -enum ps3_match_sub_id { - /* for PS3_MATCH_ID_GRAPHICS */ - PS3_MATCH_SUB_ID_FB = 1, -}; - /** * struct ps3_system_bus_device - a device on the system bus */ @@ -516,4 +517,7 @@ void ps3_sync_irq(int node); u32 ps3_get_hw_thread_id(int cpu); u64 ps3_get_spe_id(void *arg); +/* mutex synchronizing GPU accesses and video mode changes */ +extern struct mutex ps3_gpu_mutex; + #endif diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h index 5aa22cffdbd6..cd24ac16660a 100644 --- a/arch/powerpc/include/asm/ps3av.h +++ b/arch/powerpc/include/asm/ps3av.h @@ -740,8 +740,4 @@ extern int ps3av_audio_mute(int); extern int ps3av_audio_mute_analog(int); extern int ps3av_dev_open(void); extern int ps3av_dev_close(void); -extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data), - void *flip_data); -extern void ps3av_flip_ctl(int on); - #endif /* _ASM_POWERPC_PS3AV_H_ */ diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index edee15d269ea..a0a15311d0d8 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h @@ -17,6 +17,8 @@ #ifdef __KERNEL__ #include <linux/spinlock.h> +#include <linux/errno.h> +#include <linux/err.h> #include <asm/cpm.h> #include <asm/immap_qe.h> @@ -84,7 +86,11 @@ static inline bool qe_clock_is_brg(enum qe_clock clk) extern spinlock_t cmxgcr_lock; /* Export QE common operations */ +#ifdef CONFIG_QUICC_ENGINE extern void __init qe_reset(void); +#else +static inline void qe_reset(void) {} +#endif /* QE PIO */ #define QE_PIO_PINS 32 @@ -101,16 +107,43 @@ struct qe_pio_regs { #endif }; -extern int par_io_init(struct device_node *np); -extern int par_io_of_config(struct device_node *np); #define QE_PIO_DIR_IN 2 #define QE_PIO_DIR_OUT 1 extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, int open_drain, int assignment, int has_irq); +#ifdef CONFIG_QUICC_ENGINE +extern int par_io_init(struct device_node *np); +extern int par_io_of_config(struct device_node *np); extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, int assignment, int has_irq); extern int par_io_data_set(u8 port, u8 pin, u8 val); +#else +static inline int par_io_init(struct device_node *np) { return -ENOSYS; } +static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; } +static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, + int assignment, int has_irq) { return -ENOSYS; } +static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; } +#endif /* CONFIG_QUICC_ENGINE */ + +/* + * Pin multiplexing functions. + */ +struct qe_pin; +#ifdef CONFIG_QE_GPIO +extern struct qe_pin *qe_pin_request(struct device_node *np, int index); +extern void qe_pin_free(struct qe_pin *qe_pin); +extern void qe_pin_set_gpio(struct qe_pin *qe_pin); +extern void qe_pin_set_dedicated(struct qe_pin *pin); +#else +static inline struct qe_pin *qe_pin_request(struct device_node *np, int index) +{ + return ERR_PTR(-ENOSYS); +} +static inline void qe_pin_free(struct qe_pin *qe_pin) {} +static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {} +static inline void qe_pin_set_dedicated(struct qe_pin *pin) {} +#endif /* CONFIG_QE_GPIO */ /* QE internal API */ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input); diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h index 56a7745ca343..cf519663a791 100644 --- a/arch/powerpc/include/asm/qe_ic.h +++ b/arch/powerpc/include/asm/qe_ic.h @@ -17,6 +17,9 @@ #include <linux/irq.h> +struct device_node; +struct qe_ic; + #define NUM_OF_QE_IC_GROUPS 6 /* Flags when we init the QE IC */ @@ -54,17 +57,27 @@ enum qe_ic_grp_id { QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */ }; +#ifdef CONFIG_QUICC_ENGINE void qe_ic_init(struct device_node *node, unsigned int flags, void (*low_handler)(unsigned int irq, struct irq_desc *desc), void (*high_handler)(unsigned int irq, struct irq_desc *desc)); +unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); +unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); +#else +static inline void qe_ic_init(struct device_node *node, unsigned int flags, + void (*low_handler)(unsigned int irq, struct irq_desc *desc), + void (*high_handler)(unsigned int irq, struct irq_desc *desc)) +{} +static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) +{ return 0; } +static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) +{ return 0; } +#endif /* CONFIG_QUICC_ENGINE */ + void qe_ic_set_highest_priority(unsigned int virq, int high); int qe_ic_set_priority(unsigned int virq, unsigned int priority); int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); -struct qe_ic; -unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); -unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); - static inline void qe_ic_cascade_low_ipic(unsigned int irq, struct irq_desc *desc) { diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c6d1ab650778..f484a343efba 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -783,6 +783,10 @@ extern void scom970_write(unsigned int address, unsigned long value); #define __get_SP() ({unsigned long sp; \ asm volatile("mr %0,1": "=r" (sp)); sp;}) +struct pt_regs; + +extern void ppc_save_regs(struct pt_regs *regs); + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_REG_H */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 8eaa7b28d9d0..e0175beb4462 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -168,6 +168,7 @@ extern void rtas_os_term(char *str); extern int rtas_get_sensor(int sensor, int index, int *state); extern int rtas_get_power_level(int powerdomain, int *level); extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); +extern bool rtas_indicator_present(int token, int *maxindex); extern int rtas_set_indicator(int indicator, int index, int new_value); extern int rtas_set_indicator_fast(int indicator, int index, int new_value); extern void rtas_progress(char *s, unsigned short hex); diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h index ced34f1dc8f8..3d9f831c3c55 100644 --- a/arch/powerpc/include/asm/sfp-machine.h +++ b/arch/powerpc/include/asm/sfp-machine.h @@ -82,7 +82,7 @@ #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) -#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y) +#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) /* These macros define what NaN looks like. They're supposed to expand to @@ -97,6 +97,20 @@ #define _FP_KEEPNANFRACP 1 +#ifdef FP_EX_BOOKE_E500_SPE +#define FP_EX_INEXACT (1 << 21) +#define FP_EX_INVALID (1 << 20) +#define FP_EX_DIVZERO (1 << 19) +#define FP_EX_UNDERFLOW (1 << 18) +#define FP_EX_OVERFLOW (1 << 17) +#define FP_INHIBIT_RESULTS 0 + +#define __FPU_FPSCR (current->thread.spefscr) +#define __FPU_ENABLED_EXC \ +({ \ + (__FPU_FPSCR >> 2) & 0x1f; \ +}) +#else /* Exception flags. We use the bit positions of the appropriate bits in the FPSCR, which also correspond to the FE_* bits. This makes everything easier ;-). */ @@ -111,22 +125,6 @@ #define FP_EX_DIVZERO (1 << (31 - 5)) #define FP_EX_INEXACT (1 << (31 - 6)) -/* This macro appears to be called when both X and Y are NaNs, and - * has to choose one and copy it to R. i386 goes for the larger of the - * two, sparc64 just picks Y. I don't understand this at all so I'll - * go with sparc64 because it's shorter :-> -- PMM - */ -#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ - do { \ - R##_s = Y##_s; \ - _FP_FRAC_COPY_##wc(R,Y); \ - R##_c = FP_CLS_NAN; \ - } while (0) - - -#include <linux/kernel.h> -#include <linux/sched.h> - #define __FPU_FPSCR (current->thread.fpscr.val) /* We only actually write to the destination register @@ -137,6 +135,32 @@ (__FPU_FPSCR >> 3) & 0x1f; \ }) +#endif + +/* + * If one NaN is signaling and the other is not, + * we choose that one, otherwise we choose X. + */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ + do { \ + if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \ + && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \ + { \ + R##_s = X##_s; \ + _FP_FRAC_COPY_##wc(R,X); \ + } \ + else \ + { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R,Y); \ + } \ + R##_c = FP_CLS_NAN; \ + } while (0) + + +#include <linux/kernel.h> +#include <linux/sched.h> + #define __FPU_TRAP_P(bits) \ ((__FPU_ENABLED_EXC & (bits)) != 0) diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 1866cec4f967..c25f73d1d842 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -81,6 +81,13 @@ extern int cpu_to_core_id(int cpu); #define PPC_MSG_CALL_FUNC_SINGLE 2 #define PPC_MSG_DEBUGGER_BREAK 3 +/* + * irq controllers that have dedicated ipis per message and don't + * need additional code in the action handler may use this + */ +extern int smp_request_message_ipi(int virq, int message); +extern const char *smp_ipi_name[]; + void smp_init_iSeries(void); void smp_init_pSeries(void); void smp_init_cell(void); diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index f56a843f4705..36864364e601 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -277,7 +277,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) bne- 1b" : "=&r"(tmp) : "r"(&rw->lock) - : "cr0", "memory"); + : "cr0", "xer", "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h index 8b2eb044270a..0ab8d869e3d6 100644 --- a/arch/powerpc/include/asm/spu.h +++ b/arch/powerpc/include/asm/spu.h @@ -128,7 +128,7 @@ struct spu { int number; unsigned int irqs[3]; u32 node; - u64 flags; + unsigned long flags; u64 class_0_pending; u64 class_0_dar; u64 class_1_dar; diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h new file mode 100644 index 000000000000..ef824ae4b79c --- /dev/null +++ b/arch/powerpc/include/asm/swab.h @@ -0,0 +1,90 @@ +#ifndef _ASM_POWERPC_SWAB_H +#define _ASM_POWERPC_SWAB_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <asm/types.h> +#include <linux/compiler.h> + +#ifdef __GNUC__ + +#ifndef __powerpc64__ +#define __SWAB_64_THRU_32__ +#endif /* __powerpc64__ */ + +#ifdef __KERNEL__ + +static __inline__ __u16 ld_le16(const volatile __u16 *addr) +{ + __u16 val; + + __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); + return val; +} +#define __arch_swab16p ld_le16 + +static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) +{ + __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); +} + +static inline void __arch_swab16s(__u16 *addr) +{ + st_le16(addr, *addr); +} +#define __arch_swab16s __arch_swab16s + +static __inline__ __u32 ld_le32(const volatile __u32 *addr) +{ + __u32 val; + + __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); + return val; +} +#define __arch_swab32p ld_le32 + +static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) +{ + __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); +} + +static inline void __arch_swab32s(__u32 *addr) +{ + st_le32(addr, *addr); +} +#define __arch_swab32s __arch_swab32s + +static inline __attribute_const__ __u16 __arch_swab16(__u16 value) +{ + __u16 result; + + __asm__("rlwimi %0,%1,8,16,23" + : "=r" (result) + : "r" (value), "0" (value >> 8)); + return result; +} +#define __arch_swab16 __arch_swab16 + +static inline __attribute_const__ __u32 __arch_swab32(__u32 value) +{ + __u32 result; + + __asm__("rlwimi %0,%1,24,16,23\n\t" + "rlwimi %0,%1,8,8,15\n\t" + "rlwimi %0,%1,24,0,7" + : "=r" (result) + : "r" (value), "0" (value >> 24)); + return result; +} +#define __arch_swab32 __arch_swab32 + +#endif /* __KERNEL__ */ + +#endif /* __GNUC__ */ + +#endif /* _ASM_POWERPC_SWAB_H */ diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index 45963e80f557..28f6ddbff4cf 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -5,6 +5,10 @@ #include <linux/stringify.h> #include <asm/feature-fixups.h> +#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) +#define __SUBARCH_HAS_LWSYNC +#endif + #ifndef __ASSEMBLY__ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; extern void do_lwsync_fixups(unsigned long value, void *fixup_start, diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index d6648c143322..2a4be19a92c4 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -23,15 +23,17 @@ * read_barrier_depends() prevents data-dependent loads being reordered * across this point (nop on PPC). * - * We have to use the sync instructions for mb(), since lwsync doesn't - * order loads with respect to previous stores. Lwsync is fine for - * rmb(), though. Note that rmb() actually uses a sync on 32-bit - * architectures. + * *mb() variants without smp_ prefix must order all types of memory + * operations with one another. sync is the only instruction sufficient + * to do this. * - * For wmb(), we use sync since wmb is used in drivers to order - * stores to system memory with respect to writes to the device. - * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier - * on SMP since it is only used to order updates to system memory. + * For the smp_ barriers, ordering is for cacheable memory operations + * only. We have to use the sync instruction for smp_mb(), since lwsync + * doesn't order loads with respect to previous stores. Lwsync can be + * used for smp_rmb() and smp_wmb(). + * + * However, on CPUs that don't support lwsync, lwsync actually maps to a + * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. */ #define mb() __asm__ __volatile__ ("sync" : : : "memory") #define rmb() __asm__ __volatile__ ("sync" : : : "memory") @@ -45,14 +47,14 @@ #ifdef CONFIG_SMP #ifdef __SUBARCH_HAS_LWSYNC -# define SMPWMB lwsync +# define SMPWMB LWSYNC #else # define SMPWMB eieio #endif #define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory") +#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") #define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index febd581ec9b0..27ccb764fdab 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -48,26 +48,6 @@ extern unsigned long ppc_proc_freq; extern unsigned long ppc_tb_freq; #define DEFAULT_TB_FREQ 125000000UL -/* - * By putting all of this stuff into a single struct we - * reduce the number of cache lines touched by do_gettimeofday. - * Both by collecting all of the data in one cache line and - * by touching only one TOC entry on ppc64. - */ -struct gettimeofday_vars { - u64 tb_to_xs; - u64 stamp_xsec; - u64 tb_orig_stamp; -}; - -struct gettimeofday_struct { - unsigned long tb_ticks_per_sec; - struct gettimeofday_vars vars[2]; - struct gettimeofday_vars * volatile varp; - unsigned var_idx; - unsigned tb_to_us; -}; - struct div_result { u64 result_high; u64 result_low; diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index a2c6bfd85fb7..abbe3419d1dd 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -6,6 +6,9 @@ * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page + * - local_flush_tlb_mm(mm) flushes the specified mm context on + * the local processor + * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages @@ -17,7 +20,7 @@ */ #ifdef __KERNEL__ -#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) +#ifdef CONFIG_PPC_MMU_NOHASH /* * TLB flushing for software loaded TLB chips * @@ -28,63 +31,49 @@ #include <linux/mm.h> -extern void _tlbie(unsigned long address, unsigned int pid); -extern void _tlbil_all(void); -extern void _tlbil_pid(unsigned int pid); -extern void _tlbil_va(unsigned long address, unsigned int pid); +#define MMU_NO_CONTEXT ((unsigned int)-1) -#if defined(CONFIG_40x) || defined(CONFIG_8xx) -#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") -#else /* CONFIG_44x || CONFIG_FSL_BOOKE */ -extern void _tlbia(void); -#endif - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - _tlbil_pid(mm->context.id); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) -{ - _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0); -} +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); -static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, - unsigned long vmaddr) -{ - flush_tlb_page(vma, vmaddr); -} +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - _tlbil_pid(vma->vm_mm->context.id); -} +#ifdef CONFIG_SMP +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +#else +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) +#endif +#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - _tlbil_pid(0); -} +#elif defined(CONFIG_PPC_STD_MMU_32) -#elif defined(CONFIG_PPC32) /* - * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx + * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx */ -extern void _tlbie(unsigned long address); -extern void _tlbia(void); - extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +static inline void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + flush_tlb_page(vma, vmaddr); +} +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + flush_tlb_mm(mm); +} + +#elif defined(CONFIG_PPC_STD_MMU_64) -#else /* - * TLB flushing for 64-bit has-MMU CPUs + * TLB flushing for 64-bit hash-MMU CPUs */ #include <linux/percpu.h> @@ -134,10 +123,19 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, extern void flush_hash_range(unsigned long number, int local); +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ +} + static inline void flush_tlb_mm(struct mm_struct *mm) { } +static inline void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} + static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { @@ -162,7 +160,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, unsigned long end); - +#else +#error Unsupported MMU type #endif #endif /*__KERNEL__ */ diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index c32da6f97999..375258559ae6 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -22,11 +22,11 @@ static inline cpumask_t node_to_cpumask(int node) return numa_cpumask_lookup_table[node]; } +#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) + static inline int node_to_first_cpu(int node) { - cpumask_t tmp; - tmp = node_to_cpumask(node); - return first_cpu(tmp); + return cpumask_first(cpumask_of_node(node)); } int of_node_to_nid(struct device_node *device); @@ -46,9 +46,12 @@ static inline int pcibus_to_node(struct pci_bus *bus) node_to_cpumask(pcibus_to_node(bus)) \ ) +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) + /* sched_domains SD_NODE_INIT for PPC64 machines */ #define SD_NODE_INIT (struct sched_domain) { \ - .span = CPU_MASK_NONE, \ .parent = NULL, \ .child = NULL, \ .groups = NULL, \ @@ -109,6 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev, #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) +#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) +#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) #define topology_core_id(cpu) (cpu_to_core_id(cpu)) #endif #endif diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index f01393224b52..13c2c283e178 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -39,6 +39,7 @@ #ifndef __ASSEMBLY__ #include <linux/unistd.h> +#include <linux/time.h> #define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32) @@ -83,6 +84,7 @@ struct vdso_data { __u32 icache_log_block_size; /* L1 i-cache log block size */ __s32 wtom_clock_sec; /* Wall to monotonic clock */ __s32 wtom_clock_nsec; + struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ }; @@ -102,6 +104,7 @@ struct vdso_data { __u32 tz_dsttime; /* Type of dst correction 0x5C */ __s32 wtom_clock_sec; /* Wall to monotonic clock */ __s32 wtom_clock_nsec; + struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 dcache_block_size; /* L1 d-cache block size */ __u32 icache_block_size; /* L1 i-cache block size */ |