summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/bitops.h196
-rw-r--r--arch/powerpc/include/asm/cell-regs.h11
-rw-r--r--arch/powerpc/include/asm/cputhreads.h16
-rw-r--r--arch/powerpc/include/asm/device.h7
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h318
-rw-r--r--arch/powerpc/include/asm/exception-64e.h205
-rw-r--r--arch/powerpc/include/asm/exception-64s.h (renamed from arch/powerpc/include/asm/exception.h)25
-rw-r--r--arch/powerpc/include/asm/hardirq.h30
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/iommu.h10
-rw-r--r--arch/powerpc/include/asm/irq.h7
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/include/asm/mmu-40x.h3
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h6
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h208
-rw-r--r--arch/powerpc/include/asm/mmu-hash32.h16
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h22
-rw-r--r--arch/powerpc/include/asm/mmu.h46
-rw-r--r--arch/powerpc/include/asm/mmu_context.h15
-rw-r--r--arch/powerpc/include/asm/nvram.h3
-rw-r--r--arch/powerpc/include/asm/paca.h23
-rw-r--r--arch/powerpc/include/asm/page.h4
-rw-r--r--arch/powerpc/include/asm/page_64.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h40
-rw-r--r--arch/powerpc/include/asm/pci.h11
-rw-r--r--arch/powerpc/include/asm/pgalloc.h46
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h9
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h67
-rw-r--r--arch/powerpc/include/asm/pmc.h16
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h6
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h1
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h26
-rw-r--r--arch/powerpc/include/asm/pte-40x.h2
-rw-r--r--arch/powerpc/include/asm/pte-44x.h2
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h1
-rw-r--r--arch/powerpc/include/asm/pte-book3e.h84
-rw-r--r--arch/powerpc/include/asm/pte-common.h25
-rw-r--r--arch/powerpc/include/asm/pte-fsl-booke.h9
-rw-r--r--arch/powerpc/include/asm/pte-hash32.h1
-rw-r--r--arch/powerpc/include/asm/reg.h141
-rw-r--r--arch/powerpc/include/asm/reg_booke.h50
-rw-r--r--arch/powerpc/include/asm/setup.h2
-rw-r--r--arch/powerpc/include/asm/smp.h10
-rw-r--r--arch/powerpc/include/asm/swiotlb.h8
-rw-r--r--arch/powerpc/include/asm/systbl.h4
-rw-r--r--arch/powerpc/include/asm/tlb.h38
-rw-r--r--arch/powerpc/include/asm/tlbflush.h11
-rw-r--r--arch/powerpc/include/asm/vdso.h3
50 files changed, 1092 insertions, 720 deletions
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 897eade3afbe..56f2f2ea5631 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -56,174 +56,102 @@
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+/* Macro for generating the ***_bits() functions */
+#define DEFINE_BITOP(fn, op, prefix, postfix) \
+static __inline__ void fn(unsigned long mask, \
+ volatile unsigned long *_p) \
+{ \
+ unsigned long old; \
+ unsigned long *p = (unsigned long *)_p; \
+ __asm__ __volatile__ ( \
+ prefix \
+"1:" PPC_LLARX "%0,0,%3\n" \
+ stringify_in_c(op) "%0,%0,%2\n" \
+ PPC405_ERR77(0,%3) \
+ PPC_STLCX "%0,0,%3\n" \
+ "bne- 1b\n" \
+ postfix \
+ : "=&r" (old), "+m" (*p) \
+ : "r" (mask), "r" (p) \
+ : "cc", "memory"); \
+}
+
+DEFINE_BITOP(set_bits, or, "", "")
+DEFINE_BITOP(clear_bits, andc, "", "")
+DEFINE_BITOP(clear_bits_unlock, andc, LWSYNC_ON_SMP, "")
+DEFINE_BITOP(change_bits, xor, "", "")
+
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
{
- unsigned long old;
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- __asm__ __volatile__(
-"1:" PPC_LLARX "%0,0,%3 # set_bit\n"
- "or %0,%0,%2\n"
- PPC405_ERR77(0,%3)
- PPC_STLCX "%0,0,%3\n"
- "bne- 1b"
- : "=&r" (old), "+m" (*p)
- : "r" (mask), "r" (p)
- : "cc" );
+ set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
}
static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
{
- unsigned long old;
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- __asm__ __volatile__(
-"1:" PPC_LLARX "%0,0,%3 # clear_bit\n"
- "andc %0,%0,%2\n"
- PPC405_ERR77(0,%3)
- PPC_STLCX "%0,0,%3\n"
- "bne- 1b"
- : "=&r" (old), "+m" (*p)
- : "r" (mask), "r" (p)
- : "cc" );
+ clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
}
static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
{
- unsigned long old;
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- __asm__ __volatile__(
- LWSYNC_ON_SMP
-"1:" PPC_LLARX "%0,0,%3 # clear_bit_unlock\n"
- "andc %0,%0,%2\n"
- PPC405_ERR77(0,%3)
- PPC_STLCX "%0,0,%3\n"
- "bne- 1b"
- : "=&r" (old), "+m" (*p)
- : "r" (mask), "r" (p)
- : "cc", "memory");
+ clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr));
}
static __inline__ void change_bit(int nr, volatile unsigned long *addr)
{
- unsigned long old;
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- __asm__ __volatile__(
-"1:" PPC_LLARX "%0,0,%3 # change_bit\n"
- "xor %0,%0,%2\n"
- PPC405_ERR77(0,%3)
- PPC_STLCX "%0,0,%3\n"
- "bne- 1b"
- : "=&r" (old), "+m" (*p)
- : "r" (mask), "r" (p)
- : "cc" );
+ change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+}
+
+/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
+ * operands. */
+#define DEFINE_TESTOP(fn, op, prefix, postfix) \
+static __inline__ unsigned long fn( \
+ unsigned long mask, \
+ volatile unsigned long *_p) \
+{ \
+ unsigned long old, t; \
+ unsigned long *p = (unsigned long *)_p; \
+ __asm__ __volatile__ ( \
+ prefix \
+"1:" PPC_LLARX "%0,0,%3\n" \
+ stringify_in_c(op) "%1,%0,%2\n" \
+ PPC405_ERR77(0,%3) \
+ PPC_STLCX "%1,0,%3\n" \
+ "bne- 1b\n" \
+ postfix \
+ : "=&r" (old), "=&r" (t) \
+ : "r" (mask), "r" (p) \
+ : "cc", "memory"); \
+ return (old & mask); \
}
+DEFINE_TESTOP(test_and_set_bits, or, LWSYNC_ON_SMP, ISYNC_ON_SMP)
+DEFINE_TESTOP(test_and_set_bits_lock, or, "", ISYNC_ON_SMP)
+DEFINE_TESTOP(test_and_clear_bits, andc, LWSYNC_ON_SMP, ISYNC_ON_SMP)
+DEFINE_TESTOP(test_and_change_bits, xor, LWSYNC_ON_SMP, ISYNC_ON_SMP)
+
static __inline__ int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long old, t;
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- __asm__ __volatile__(
- LWSYNC_ON_SMP
-"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n"
- "or %1,%0,%2 \n"
- PPC405_ERR77(0,%3)
- PPC_STLCX "%1,0,%3 \n"
- "bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (old), "=&r" (t)
- : "r" (mask), "r" (p)
- : "cc", "memory");
-
- return (old & mask) != 0;
+ return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
}
static __inline__ int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long old, t;
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- __asm__ __volatile__(
-"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit_lock\n"
- "or %1,%0,%2 \n"
- PPC405_ERR77(0,%3)
- PPC_STLCX "%1,0,%3 \n"
- "bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (old), "=&r" (t)
- : "r" (mask), "r" (p)
- : "cc", "memory");
-
- return (old & mask) != 0;
+ return test_and_set_bits_lock(BITOP_MASK(nr),
+ addr + BITOP_WORD(nr)) != 0;
}
static __inline__ int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long old, t;
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- __asm__ __volatile__(
- LWSYNC_ON_SMP
-"1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n"
- "andc %1,%0,%2 \n"
- PPC405_ERR77(0,%3)
- PPC_STLCX "%1,0,%3 \n"
- "bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (old), "=&r" (t)
- : "r" (mask), "r" (p)
- : "cc", "memory");
-
- return (old & mask) != 0;
+ return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
}
static __inline__ int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned long old, t;
- unsigned long mask = BITOP_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
-
- __asm__ __volatile__(
- LWSYNC_ON_SMP
-"1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n"
- "xor %1,%0,%2 \n"
- PPC405_ERR77(0,%3)
- PPC_STLCX "%1,0,%3 \n"
- "bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (old), "=&r" (t)
- : "r" (mask), "r" (p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
-{
- unsigned long old;
-
- __asm__ __volatile__(
-"1:" PPC_LLARX "%0,0,%3 # set_bits\n"
- "or %0,%0,%2\n"
- PPC_STLCX "%0,0,%3\n"
- "bne- 1b"
- : "=&r" (old), "+m" (*addr)
- : "r" (mask), "r" (addr)
- : "cc");
+ return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
}
#include <asm-generic/bitops/non-atomic.h>
diff --git a/arch/powerpc/include/asm/cell-regs.h b/arch/powerpc/include/asm/cell-regs.h
index fd6fd00434ef..fdf64fd25950 100644
--- a/arch/powerpc/include/asm/cell-regs.h
+++ b/arch/powerpc/include/asm/cell-regs.h
@@ -303,6 +303,17 @@ struct cbe_mic_tm_regs {
extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
+
+/* Cell page table entries */
+#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */
+#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */
+#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */
+#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
+#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
+#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
+#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */
+#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
+
/* some utility functions to deal with SMT */
extern u32 cbe_get_hw_thread_id(int cpu);
extern u32 cbe_cpu_to_node(int cpu);
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index fb11b0c459b8..a8e18447c62b 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -5,6 +5,15 @@
/*
* Mapping of threads to cores
+ *
+ * Note: This implementation is limited to a power of 2 number of
+ * threads per core and the same number for each core in the system
+ * (though it would work if some processors had less threads as long
+ * as the CPU numbers are still allocated, just not brought offline).
+ *
+ * However, the API allows for a different implementation in the future
+ * if needed, as long as you only use the functions and not the variables
+ * directly.
*/
#ifdef CONFIG_SMP
@@ -67,5 +76,12 @@ static inline int cpu_first_thread_in_core(int cpu)
return cpu & ~(threads_per_core - 1);
}
+static inline int cpu_last_thread_in_core(int cpu)
+{
+ return cpu | (threads_per_core - 1);
+}
+
+
+
#endif /* _ASM_POWERPC_CPUTHREADS_H */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index e3e06e0f7fc0..9dade15d1ab4 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -6,7 +6,7 @@
#ifndef _ASM_POWERPC_DEVICE_H
#define _ASM_POWERPC_DEVICE_H
-struct dma_mapping_ops;
+struct dma_map_ops;
struct device_node;
struct dev_archdata {
@@ -14,8 +14,11 @@ struct dev_archdata {
struct device_node *of_node;
/* DMA operations on that device */
- struct dma_mapping_ops *dma_ops;
+ struct dma_map_ops *dma_ops;
void *dma_data;
+#ifdef CONFIG_SWIOTLB
+ dma_addr_t max_direct_dma_addr;
+#endif
};
static inline void dev_archdata_set_node(struct dev_archdata *ad,
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 0c34371ec49c..cb2ca41dd526 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-attrs.h>
+#include <linux/dma-debug.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
@@ -64,58 +65,14 @@ static inline unsigned long device_to_mask(struct device *dev)
}
/*
- * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
- */
-struct dma_mapping_ops {
- void * (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
- void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
- int (*map_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs);
- int (*dma_supported)(struct device *dev, u64 mask);
- int (*set_dma_mask)(struct device *dev, u64 dma_mask);
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs);
- void (*unmap_page)(struct device *dev,
- dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs);
- int (*addr_needs_map)(struct device *dev, dma_addr_t addr,
- size_t size);
-#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
- void (*sync_single_range_for_cpu)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size,
- enum dma_data_direction direction);
- void (*sync_single_range_for_device)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size,
- enum dma_data_direction direction);
- void (*sync_sg_for_cpu)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction);
- void (*sync_sg_for_device)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction);
-#endif
-};
-
-/*
* Available generic sets of operations
*/
#ifdef CONFIG_PPC64
-extern struct dma_mapping_ops dma_iommu_ops;
+extern struct dma_map_ops dma_iommu_ops;
#endif
-extern struct dma_mapping_ops dma_direct_ops;
+extern struct dma_map_ops dma_direct_ops;
-static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
/* We don't handle the NULL dev case for ISA for now. We could
* do it via an out of line call but it is not needed for now. The
@@ -128,14 +85,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
return dev->archdata.dma_ops;
}
-static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
dev->archdata.dma_ops = ops;
}
+/* this will be removed soon */
+#define flush_write_buffers()
+
+#include <asm-generic/dma-mapping-common.h>
+
static inline int dma_supported(struct device *dev, u64 mask)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (unlikely(dma_ops == NULL))
return 0;
@@ -149,7 +111,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (unlikely(dma_ops == NULL))
return -EIO;
@@ -161,262 +123,40 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
return 0;
}
-/*
- * map_/unmap_single actually call through to map/unmap_page now that all the
- * dma_mapping_ops have been converted over. We just have to get the page and
- * offset to pass through to map_page
- */
-static inline dma_addr_t dma_map_single_attrs(struct device *dev,
- void *cpu_addr,
- size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- return dma_ops->map_page(dev, virt_to_page(cpu_addr),
- (unsigned long)cpu_addr % PAGE_SIZE, size,
- direction, attrs);
-}
-
-static inline void dma_unmap_single_attrs(struct device *dev,
- dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
-}
-
-static inline dma_addr_t dma_map_page_attrs(struct device *dev,
- struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- return dma_ops->map_page(dev, page, offset, size, direction, attrs);
-}
-
-static inline void dma_unmap_page_attrs(struct device *dev,
- dma_addr_t dma_address,
- size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
-}
-
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
- return dma_ops->map_sg(dev, sg, nents, direction, attrs);
-}
-
-static inline void dma_unmap_sg_attrs(struct device *dev,
- struct scatterlist *sg,
- int nhwentries,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
- dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
-}
-
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
- return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-}
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
-}
-
-static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
-}
-
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries,
- enum dma_data_direction direction)
-{
- dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
-}
-
-#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- if (dma_ops->sync_single_range_for_cpu)
- dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
- size, direction);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- if (dma_ops->sync_single_range_for_device)
- dma_ops->sync_single_range_for_device(dev, dma_handle,
- 0, size, direction);
-}
-
-static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ void *cpu_addr;
BUG_ON(!dma_ops);
- if (dma_ops->sync_sg_for_cpu)
- dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
-}
-
-static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- if (dma_ops->sync_sg_for_device)
- dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag);
- BUG_ON(!dma_ops);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- if (dma_ops->sync_single_range_for_cpu)
- dma_ops->sync_single_range_for_cpu(dev, dma_handle,
- offset, size, direction);
+ return cpu_addr;
}
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- if (dma_ops->sync_single_range_for_device)
- dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
- size, direction);
-}
-#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents,
- enum dma_data_direction direction)
-{
-}
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents,
- enum dma_data_direction direction)
-{
+ dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
}
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
-}
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-#endif
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dev, dma_addr);
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
#ifdef CONFIG_PPC64
return (dma_addr == DMA_ERROR_CODE);
#else
@@ -426,10 +166,12 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
- struct dma_mapping_ops *ops = get_dma_ops(dev);
+#ifdef CONFIG_SWIOTLB
+ struct dev_archdata *sd = &dev->archdata;
- if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size))
+ if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
return 0;
+#endif
if (!dev->dma_mask)
return 0;
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
new file mode 100644
index 000000000000..6d53f311d942
--- /dev/null
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -0,0 +1,205 @@
+/*
+ * Definitions for use by exception code on Book3-E
+ *
+ * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_EXCEPTION_64E_H
+#define _ASM_POWERPC_EXCEPTION_64E_H
+
+/*
+ * SPRGs usage an other considerations...
+ *
+ * Since TLB miss and other standard exceptions can be interrupted by
+ * critical exceptions which can themselves be interrupted by machine
+ * checks, and since the two later can themselves cause a TLB miss when
+ * hitting the linear mapping for the kernel stacks, we need to be a bit
+ * creative on how we use SPRGs.
+ *
+ * The base idea is that we have one SRPG reserved for critical and one
+ * for machine check interrupts. Those are used to save a GPR that can
+ * then be used to get the PACA, and store as much context as we need
+ * to save in there. That includes saving the SPRGs used by the TLB miss
+ * handler for linear mapping misses and the associated SRR0/1 due to
+ * the above re-entrancy issue.
+ *
+ * So here's the current usage pattern. It's done regardless of which
+ * SPRGs are user-readable though, thus we might have to change some of
+ * this later. In order to do that more easily, we use special constants
+ * for naming them
+ *
+ * WARNING: Some of these SPRGs are user readable. We need to do something
+ * about it as some point by making sure they can't be used to leak kernel
+ * critical data
+ */
+
+
+/* We are out of SPRGs so we save some things in the PACA. The normal
+ * exception frame is smaller than the CRIT or MC one though
+ */
+#define EX_R1 (0 * 8)
+#define EX_CR (1 * 8)
+#define EX_R10 (2 * 8)
+#define EX_R11 (3 * 8)
+#define EX_R14 (4 * 8)
+#define EX_R15 (5 * 8)
+
+/* The TLB miss exception uses different slots */
+
+#define EX_TLB_R10 ( 0 * 8)
+#define EX_TLB_R11 ( 1 * 8)
+#define EX_TLB_R12 ( 2 * 8)
+#define EX_TLB_R13 ( 3 * 8)
+#define EX_TLB_R14 ( 4 * 8)
+#define EX_TLB_R15 ( 5 * 8)
+#define EX_TLB_R16 ( 6 * 8)
+#define EX_TLB_CR ( 7 * 8)
+#define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */
+#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */
+#define EX_TLB_SRR0 (10 * 8)
+#define EX_TLB_SRR1 (11 * 8)
+#define EX_TLB_MMUCR0 (12 * 8) /* Level 0 */
+#define EX_TLB_MAS1 (12 * 8) /* Level 0 */
+#define EX_TLB_MAS2 (13 * 8) /* Level 0 */
+#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
+#define EX_TLB_R8 (14 * 8)
+#define EX_TLB_R9 (15 * 8)
+#define EX_TLB_LR (16 * 8)
+#define EX_TLB_SIZE (17 * 8)
+#else
+#define EX_TLB_SIZE (14 * 8)
+#endif
+
+#define START_EXCEPTION(label) \
+ .globl exc_##label##_book3e; \
+exc_##label##_book3e:
+
+/* TLB miss exception prolog
+ *
+ * This prolog handles re-entrancy (up to 3 levels supported in the PACA
+ * though we currently don't test for overflow). It provides you with a
+ * re-entrancy safe working space of r10...r16 and CR with r12 being used
+ * as the exception area pointer in the PACA for that level of re-entrancy
+ * and r13 containing the PACA pointer.
+ *
+ * SRR0 and SRR1 are saved, but DEAR and ESR are not, since they don't apply
+ * as-is for instruction exceptions. It's up to the actual exception code
+ * to save them as well if required.
+ */
+#define TLB_MISS_PROLOG \
+ mtspr SPRN_SPRG_TLB_SCRATCH,r12; \
+ mfspr r12,SPRN_SPRG_TLB_EXFRAME; \
+ std r10,EX_TLB_R10(r12); \
+ mfcr r10; \
+ std r11,EX_TLB_R11(r12); \
+ mfspr r11,SPRN_SPRG_TLB_SCRATCH; \
+ std r13,EX_TLB_R13(r12); \
+ mfspr r13,SPRN_SPRG_PACA; \
+ std r14,EX_TLB_R14(r12); \
+ addi r14,r12,EX_TLB_SIZE; \
+ std r15,EX_TLB_R15(r12); \
+ mfspr r15,SPRN_SRR1; \
+ std r16,EX_TLB_R16(r12); \
+ mfspr r16,SPRN_SRR0; \
+ std r10,EX_TLB_CR(r12); \
+ std r11,EX_TLB_R12(r12); \
+ mtspr SPRN_SPRG_TLB_EXFRAME,r14; \
+ std r15,EX_TLB_SRR1(r12); \
+ std r16,EX_TLB_SRR0(r12); \
+ TLB_MISS_PROLOG_STATS
+
+/* And these are the matching epilogs that restores things
+ *
+ * There are 3 epilogs:
+ *
+ * - SUCCESS : Unwinds one level
+ * - ERROR : restore from level 0 and reset
+ * - ERROR_SPECIAL : restore from current level and reset
+ *
+ * Normal errors use ERROR, that is, they restore the initial fault context
+ * and trigger a fault. However, there is a special case for linear mapping
+ * errors. Those should basically never happen, but if they do happen, we
+ * want the error to point out the context that did that linear mapping
+ * fault, not the initial level 0 (basically, we got a bogus PGF or something
+ * like that). For userland errors on the linear mapping, there is no
+ * difference since those are always level 0 anyway
+ */
+
+#define TLB_MISS_RESTORE(freg) \
+ ld r14,EX_TLB_CR(r12); \
+ ld r10,EX_TLB_R10(r12); \
+ ld r15,EX_TLB_SRR0(r12); \
+ ld r16,EX_TLB_SRR1(r12); \
+ mtspr SPRN_SPRG_TLB_EXFRAME,freg; \
+ ld r11,EX_TLB_R11(r12); \
+ mtcr r14; \
+ ld r13,EX_TLB_R13(r12); \
+ ld r14,EX_TLB_R14(r12); \
+ mtspr SPRN_SRR0,r15; \
+ ld r15,EX_TLB_R15(r12); \
+ mtspr SPRN_SRR1,r16; \
+ TLB_MISS_RESTORE_STATS \
+ ld r16,EX_TLB_R16(r12); \
+ ld r12,EX_TLB_R12(r12); \
+
+#define TLB_MISS_EPILOG_SUCCESS \
+ TLB_MISS_RESTORE(r12)
+
+#define TLB_MISS_EPILOG_ERROR \
+ addi r12,r13,PACA_EXTLB; \
+ TLB_MISS_RESTORE(r12)
+
+#define TLB_MISS_EPILOG_ERROR_SPECIAL \
+ addi r11,r13,PACA_EXTLB; \
+ TLB_MISS_RESTORE(r11)
+
+#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
+#define TLB_MISS_PROLOG_STATS \
+ mflr r10; \
+ std r8,EX_TLB_R8(r12); \
+ std r9,EX_TLB_R9(r12); \
+ std r10,EX_TLB_LR(r12);
+#define TLB_MISS_RESTORE_STATS \
+ ld r16,EX_TLB_LR(r12); \
+ ld r9,EX_TLB_R9(r12); \
+ ld r8,EX_TLB_R8(r12); \
+ mtlr r16;
+#define TLB_MISS_STATS_D(name) \
+ addi r9,r13,MMSTAT_DSTATS+name; \
+ bl .tlb_stat_inc;
+#define TLB_MISS_STATS_I(name) \
+ addi r9,r13,MMSTAT_ISTATS+name; \
+ bl .tlb_stat_inc;
+#define TLB_MISS_STATS_X(name) \
+ ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \
+ cmpdi cr2,r8,-1; \
+ beq cr2,61f; \
+ addi r9,r13,MMSTAT_DSTATS+name; \
+ b 62f; \
+61: addi r9,r13,MMSTAT_ISTATS+name; \
+62: bl .tlb_stat_inc;
+#define TLB_MISS_STATS_SAVE_INFO \
+ std r14,EX_TLB_ESR(r12); /* save ESR */ \
+
+
+#else
+#define TLB_MISS_PROLOG_STATS
+#define TLB_MISS_RESTORE_STATS
+#define TLB_MISS_STATS_D(name)
+#define TLB_MISS_STATS_I(name)
+#define TLB_MISS_STATS_X(name)
+#define TLB_MISS_STATS_Y(name)
+#define TLB_MISS_STATS_SAVE_INFO
+#endif
+
+#define SET_IVOR(vector_number, vector_offset) \
+ li r3,vector_offset@l; \
+ ori r3,r3,interrupt_base_book3e@l; \
+ mtspr SPRN_IVOR##vector_number,r3;
+
+#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
+
diff --git a/arch/powerpc/include/asm/exception.h b/arch/powerpc/include/asm/exception-64s.h
index d3d4534e3c74..a98653b26231 100644
--- a/arch/powerpc/include/asm/exception.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -57,17 +57,16 @@
addi reg,reg,(label)-_stext; /* virt addr of handler ... */
#define EXCEPTION_PROLOG_1(area) \
- mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
+ mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
std r9,area+EX_R9(r13); /* save r9 - r12 */ \
std r10,area+EX_R10(r13); \
std r11,area+EX_R11(r13); \
std r12,area+EX_R12(r13); \
- mfspr r9,SPRN_SPRG1; \
+ mfspr r9,SPRN_SPRG_SCRATCH0; \
std r9,area+EX_R13(r13); \
mfcr r9
-#define EXCEPTION_PROLOG_PSERIES(area, label) \
- EXCEPTION_PROLOG_1(area); \
+#define EXCEPTION_PROLOG_PSERIES_1(label) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
mfspr r11,SPRN_SRR0; /* save SRR0 */ \
@@ -78,6 +77,10 @@
rfid; \
b . /* prevent speculative execution */
+#define EXCEPTION_PROLOG_PSERIES(area, label) \
+ EXCEPTION_PROLOG_1(area); \
+ EXCEPTION_PROLOG_PSERIES_1(label);
+
/*
* The common exception prolog is used for all except a few exceptions
* such as a segment miss on a kernel address. We have to be prepared
@@ -144,7 +147,7 @@
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
- mtspr SPRN_SPRG1,r13; /* save r13 */ \
+ mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define HSTD_EXCEPTION_PSERIES(n, label) \
@@ -152,13 +155,13 @@ label##_pSeries: \
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
- mtspr SPRN_SPRG1,r20; /* save r20 */ \
+ mtspr SPRN_SPRG_SCRATCH0,r20; /* save r20 */ \
mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
mtspr SPRN_SRR0,r20; \
mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
mtspr SPRN_SRR1,r20; \
- mfspr r20,SPRN_SPRG1; /* restore r20 */ \
- mtspr SPRN_SPRG1,r13; /* save r13 */ \
+ mfspr r20,SPRN_SPRG_SCRATCH0; /* restore r20 */ \
+ mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
@@ -167,15 +170,15 @@ label##_pSeries: \
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
- mtspr SPRN_SPRG1,r13; /* save r13 */ \
- mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
+ mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
+ mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
std r10,PACA_EXGEN+EX_R10(r13); \
lbz r10,PACASOFTIRQEN(r13); \
mfcr r9; \
cmpwi r10,0; \
beq masked_interrupt; \
- mfspr r10,SPRN_SPRG1; \
+ mfspr r10,SPRN_SPRG_SCRATCH0; \
std r10,PACA_EXGEN+EX_R13(r13); \
std r11,PACA_EXGEN+EX_R11(r13); \
std r12,PACA_EXGEN+EX_R12(r13); \
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 288e14d53b7f..fb3c05a0cbbf 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -1,29 +1 @@
-#ifndef _ASM_POWERPC_HARDIRQ_H
-#define _ASM_POWERPC_HARDIRQ_H
-#ifdef __KERNEL__
-
-#include <asm/irq.h>
-#include <asm/bug.h>
-
-/* The __last_jiffy_stamp field is needed to ensure that no decrementer
- * interrupt is lost on SMP machines. Since on most CPUs it is in the same
- * cache line as local_irq_count, it is cheap to access and is also used on UP
- * for uniformity.
- */
-typedef struct {
- unsigned int __softirq_pending; /* set_bit is used on this */
- unsigned int __last_jiffy_stamp;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
-
-static inline void ack_bad_irq(int irq)
-{
- printk(KERN_CRIT "illegal vector %d received!\n", irq);
- BUG();
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_HARDIRQ_H */
+#include <asm-generic/hardirq.h>
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 8b505eaaa38a..e73d554538dd 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -49,8 +49,13 @@ extern void iseries_handle_interrupts(void);
#define raw_irqs_disabled() (local_get_flags() == 0)
#define raw_irqs_disabled_flags(flags) ((flags) == 0)
+#ifdef CONFIG_PPC_BOOK3E
+#define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory");
+#define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory");
+#else
#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
+#endif
#define hard_irq_disable() \
do { \
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7ead7c16fb7c..7464c0daddd1 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -35,16 +35,6 @@
#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
-/* Cell page table entries */
-#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */
-#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */
-#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */
-#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
-#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
-#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
-#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */
-#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
-
/* Boot time flags */
extern int iommu_is_off;
extern int iommu_force_on;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0a5137676e1b..bbcd1aaf3dfd 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -302,7 +302,8 @@ extern void irq_free_virt(unsigned int virq, unsigned int count);
/* -- OF helpers -- */
-/* irq_create_of_mapping - Map a hardware interrupt into linux virq space
+/**
+ * irq_create_of_mapping - Map a hardware interrupt into linux virq space
* @controller: Device node of the interrupt controller
* @inspec: Interrupt specifier from the device-tree
* @intsize: Size of the interrupt specifier from the device-tree
@@ -314,8 +315,8 @@ extern void irq_free_virt(unsigned int virq, unsigned int count);
extern unsigned int irq_create_of_mapping(struct device_node *controller,
u32 *intspec, unsigned int intsize);
-
-/* irq_of_parse_and_map - Parse nad Map an interrupt into linux virq space
+/**
+ * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
* @index: Index of the interrupt to map
*
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 11d1fc3a8962..9efa2be78331 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -209,14 +209,14 @@ struct machdep_calls {
/*
* optional PCI "hooks"
*/
- /* Called in indirect_* to avoid touching devices */
- int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
-
/* Called at then very end of pcibios_init() */
void (*pcibios_after_init)(void);
#endif /* CONFIG_PPC32 */
+ /* Called in indirect_* to avoid touching devices */
+ int (*pci_exclude_device)(struct pci_controller *, unsigned char, unsigned char);
+
/* Called after PPC generic resource fixup to perform
machine specific fixups */
void (*pcibios_fixup_resources)(struct pci_dev *);
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h
index 776f415a36aa..34916865eaef 100644
--- a/arch/powerpc/include/asm/mmu-40x.h
+++ b/arch/powerpc/include/asm/mmu-40x.h
@@ -61,4 +61,7 @@ typedef struct {
#endif /* !__ASSEMBLY__ */
+#define mmu_virtual_psize MMU_PAGE_4K
+#define mmu_linear_psize MMU_PAGE_256M
+
#endif /* _ASM_POWERPC_MMU_40X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index 3c86576bfefa..0372669383a8 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -79,16 +79,22 @@ typedef struct {
#if (PAGE_SHIFT == 12)
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
+#define mmu_virtual_psize MMU_PAGE_4K
#elif (PAGE_SHIFT == 14)
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
+#define mmu_virtual_psize MMU_PAGE_16K
#elif (PAGE_SHIFT == 16)
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
+#define mmu_virtual_psize MMU_PAGE_64K
#elif (PAGE_SHIFT == 18)
#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
+#define mmu_virtual_psize MMU_PAGE_256K
#else
#error "Unsupported PAGE_SIZE"
#endif
+#define mmu_linear_psize MMU_PAGE_256M
+
#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 07865a357848..3d11d3ce79ec 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -143,4 +143,7 @@ typedef struct {
} mm_context_t;
#endif /* !__ASSEMBLY__ */
+#define mmu_virtual_psize MMU_PAGE_4K
+#define mmu_linear_psize MMU_PAGE_8M
+
#endif /* _ASM_POWERPC_MMU_8XX_H_ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 7e74cff81d86..74695816205c 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -38,58 +38,140 @@
#define BOOK3E_PAGESZ_1TB 30
#define BOOK3E_PAGESZ_2TB 31
-#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000)
-#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000)
-#define MAS0_NV(x) ((x) & 0x00000FFF)
-
-#define MAS1_VALID 0x80000000
-#define MAS1_IPROT 0x40000000
-#define MAS1_TID(x) ((x << 16) & 0x3FFF0000)
-#define MAS1_IND 0x00002000
-#define MAS1_TS 0x00001000
-#define MAS1_TSIZE(x) ((x << 7) & 0x00000F80)
-
-#define MAS2_EPN 0xFFFFF000
-#define MAS2_X0 0x00000040
-#define MAS2_X1 0x00000020
-#define MAS2_W 0x00000010
-#define MAS2_I 0x00000008
-#define MAS2_M 0x00000004
-#define MAS2_G 0x00000002
-#define MAS2_E 0x00000001
+/* MAS registers bit definitions */
+
+#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000)
+#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000)
+#define MAS0_NV(x) ((x) & 0x00000FFF)
+#define MAS0_HES 0x00004000
+#define MAS0_WQ_ALLWAYS 0x00000000
+#define MAS0_WQ_COND 0x00001000
+#define MAS0_WQ_CLR_RSRV 0x00002000
+
+#define MAS1_VALID 0x80000000
+#define MAS1_IPROT 0x40000000
+#define MAS1_TID(x) ((x << 16) & 0x3FFF0000)
+#define MAS1_IND 0x00002000
+#define MAS1_TS 0x00001000
+#define MAS1_TSIZE_MASK 0x00000f80
+#define MAS1_TSIZE_SHIFT 7
+#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
+
+#define MAS2_EPN 0xFFFFF000
+#define MAS2_X0 0x00000040
+#define MAS2_X1 0x00000020
+#define MAS2_W 0x00000010
+#define MAS2_I 0x00000008
+#define MAS2_M 0x00000004
+#define MAS2_G 0x00000002
+#define MAS2_E 0x00000001
#define MAS2_EPN_MASK(size) (~0 << (size + 10))
#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
-#define MAS3_RPN 0xFFFFF000
-#define MAS3_U0 0x00000200
-#define MAS3_U1 0x00000100
-#define MAS3_U2 0x00000080
-#define MAS3_U3 0x00000040
-#define MAS3_UX 0x00000020
-#define MAS3_SX 0x00000010
-#define MAS3_UW 0x00000008
-#define MAS3_SW 0x00000004
-#define MAS3_UR 0x00000002
-#define MAS3_SR 0x00000001
-
-#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
-#define MAS4_INDD 0x00008000
-#define MAS4_TSIZED(x) MAS1_TSIZE(x)
-#define MAS4_X0D 0x00000040
-#define MAS4_X1D 0x00000020
-#define MAS4_WD 0x00000010
-#define MAS4_ID 0x00000008
-#define MAS4_MD 0x00000004
-#define MAS4_GD 0x00000002
-#define MAS4_ED 0x00000001
-
-#define MAS6_SPID0 0x3FFF0000
-#define MAS6_SPID1 0x00007FFE
-#define MAS6_ISIZE(x) MAS1_TSIZE(x)
-#define MAS6_SAS 0x00000001
-#define MAS6_SPID MAS6_SPID0
-
-#define MAS7_RPN 0xFFFFFFFF
+#define MAS3_RPN 0xFFFFF000
+#define MAS3_U0 0x00000200
+#define MAS3_U1 0x00000100
+#define MAS3_U2 0x00000080
+#define MAS3_U3 0x00000040
+#define MAS3_UX 0x00000020
+#define MAS3_SX 0x00000010
+#define MAS3_UW 0x00000008
+#define MAS3_SW 0x00000004
+#define MAS3_UR 0x00000002
+#define MAS3_SR 0x00000001
+#define MAS3_SPSIZE 0x0000003e
+#define MAS3_SPSIZE_SHIFT 1
+
+#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
+#define MAS4_INDD 0x00008000 /* Default IND */
+#define MAS4_TSIZED(x) MAS1_TSIZE(x)
+#define MAS4_X0D 0x00000040
+#define MAS4_X1D 0x00000020
+#define MAS4_WD 0x00000010
+#define MAS4_ID 0x00000008
+#define MAS4_MD 0x00000004
+#define MAS4_GD 0x00000002
+#define MAS4_ED 0x00000001
+#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */
+#define MAS4_WIMGED_SHIFT 0
+#define MAS4_VLED MAS4_X1D /* Default VLE */
+#define MAS4_ACMD 0x000000c0 /* Default ACM */
+#define MAS4_ACMD_SHIFT 6
+#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */
+#define MAS4_TSIZED_SHIFT 7
+
+#define MAS6_SPID0 0x3FFF0000
+#define MAS6_SPID1 0x00007FFE
+#define MAS6_ISIZE(x) MAS1_TSIZE(x)
+#define MAS6_SAS 0x00000001
+#define MAS6_SPID MAS6_SPID0
+#define MAS6_SIND 0x00000002 /* Indirect page */
+#define MAS6_SIND_SHIFT 1
+#define MAS6_SPID_MASK 0x3fff0000
+#define MAS6_SPID_SHIFT 16
+#define MAS6_ISIZE_MASK 0x00000f80
+#define MAS6_ISIZE_SHIFT 7
+
+#define MAS7_RPN 0xFFFFFFFF
+
+/* Bit definitions for MMUCSR0 */
+#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
+#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
+#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
+#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */
+#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */
+#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */
+#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */
+
+/* TLBnCFG encoding */
+#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */
+#define TLBnCFG_HES 0x00002000 /* HW select supported */
+#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */
+#define TLBnCFG_GTWE 0x00010000 /* Guest can write */
+#define TLBnCFG_IND 0x00020000 /* IND entries supported */
+#define TLBnCFG_PT 0x00040000 /* Can load from page table */
+#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
+
+/* TLBnPS encoding */
+#define TLBnPS_4K 0x00000004
+#define TLBnPS_8K 0x00000008
+#define TLBnPS_16K 0x00000010
+#define TLBnPS_32K 0x00000020
+#define TLBnPS_64K 0x00000040
+#define TLBnPS_128K 0x00000080
+#define TLBnPS_256K 0x00000100
+#define TLBnPS_512K 0x00000200
+#define TLBnPS_1M 0x00000400
+#define TLBnPS_2M 0x00000800
+#define TLBnPS_4M 0x00001000
+#define TLBnPS_8M 0x00002000
+#define TLBnPS_16M 0x00004000
+#define TLBnPS_32M 0x00008000
+#define TLBnPS_64M 0x00010000
+#define TLBnPS_128M 0x00020000
+#define TLBnPS_256M 0x00040000
+#define TLBnPS_512M 0x00080000
+#define TLBnPS_1G 0x00100000
+#define TLBnPS_2G 0x00200000
+#define TLBnPS_4G 0x00400000
+#define TLBnPS_8G 0x00800000
+#define TLBnPS_16G 0x01000000
+#define TLBnPS_32G 0x02000000
+#define TLBnPS_64G 0x04000000
+#define TLBnPS_128G 0x08000000
+#define TLBnPS_256G 0x10000000
+
+/* tlbilx action encoding */
+#define TLBILX_T_ALL 0
+#define TLBILX_T_TID 1
+#define TLBILX_T_FULLMATCH 3
+#define TLBILX_T_CLASS0 4
+#define TLBILX_T_CLASS1 5
+#define TLBILX_T_CLASS2 6
+#define TLBILX_T_CLASS3 7
#ifndef __ASSEMBLY__
@@ -100,6 +182,34 @@ typedef struct {
unsigned int active;
unsigned long vdso_base;
} mm_context_t;
+
+/* Page size definitions, common between 32 and 64-bit
+ *
+ * shift : is the "PAGE_SHIFT" value for that page size
+ * penc : is the pte encoding mask
+ *
+ */
+struct mmu_psize_def
+{
+ unsigned int shift; /* number of bits */
+ unsigned int enc; /* PTE encoding */
+};
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+/* The page sizes use the same names as 64-bit hash but are
+ * constants
+ */
+#if defined(CONFIG_PPC_4K_PAGES)
+#define mmu_virtual_psize MMU_PAGE_4K
+#elif defined(CONFIG_PPC_64K_PAGES)
+#define mmu_virtual_psize MMU_PAGE_64K
+#else
+#error Unsupported page size
+#endif
+
+extern int mmu_linear_psize;
+extern int mmu_vmemmap_psize;
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash32.h b/arch/powerpc/include/asm/mmu-hash32.h
index 16b1a1e77e64..16f513e5cbd7 100644
--- a/arch/powerpc/include/asm/mmu-hash32.h
+++ b/arch/powerpc/include/asm/mmu-hash32.h
@@ -55,21 +55,25 @@ struct ppc_bat {
#ifndef __ASSEMBLY__
-/* Hardware Page Table Entry */
+/*
+ * Hardware Page Table Entry
+ * Note that the xpn and x bitfields are used only by processors that
+ * support extended addressing; otherwise, those bits are reserved.
+ */
struct hash_pte {
unsigned long v:1; /* Entry is valid */
unsigned long vsid:24; /* Virtual segment identifier */
unsigned long h:1; /* Hash algorithm indicator */
unsigned long api:6; /* Abbreviated page index */
unsigned long rpn:20; /* Real (physical) page number */
- unsigned long :3; /* Unused */
+ unsigned long xpn:3; /* Real page number bits 0-2, optional */
unsigned long r:1; /* Referenced */
unsigned long c:1; /* Changed */
unsigned long w:1; /* Write-thru cache mode */
unsigned long i:1; /* Cache inhibited */
unsigned long m:1; /* Memory coherence */
unsigned long g:1; /* Guarded */
- unsigned long :1; /* Unused */
+ unsigned long x:1; /* Real page number bit 3, optional */
unsigned long pp:2; /* Page protection */
};
@@ -80,4 +84,10 @@ typedef struct {
#endif /* !__ASSEMBLY__ */
+/* We happily ignore the smaller BATs on 601, we don't actually use
+ * those definitions on hash32 at the moment anyway
+ */
+#define mmu_virtual_psize MMU_PAGE_4K
+#define mmu_linear_psize MMU_PAGE_256M
+
#endif /* _ASM_POWERPC_MMU_HASH32_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 98c104a09961..bebe31c2e907 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -41,6 +41,7 @@ extern char initial_stab[];
#define SLB_NUM_BOLTED 3
#define SLB_CACHE_ENTRIES 8
+#define SLB_MIN_SIZE 32
/* Bits in the SLB ESID word */
#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
@@ -139,26 +140,6 @@ struct mmu_psize_def
#endif /* __ASSEMBLY__ */
/*
- * The kernel use the constants below to index in the page sizes array.
- * The use of fixed constants for this purpose is better for performances
- * of the low level hash refill handlers.
- *
- * A non supported page size has a "shift" field set to 0
- *
- * Any new page size being implemented can get a new entry in here. Whether
- * the kernel will use it or not is a different matter though. The actual page
- * size used by hugetlbfs is not defined here and may be made variable
- */
-
-#define MMU_PAGE_4K 0 /* 4K */
-#define MMU_PAGE_64K 1 /* 64K */
-#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
-#define MMU_PAGE_1M 3 /* 1M */
-#define MMU_PAGE_16M 4 /* 16M */
-#define MMU_PAGE_16G 5 /* 16G */
-#define MMU_PAGE_COUNT 6
-
-/*
* Segment sizes.
* These are the values used by hardware in the B field of
* SLB entries and the first dword of MMU hashtable entries.
@@ -296,6 +277,7 @@ extern void slb_flush_and_rebolt(void);
extern void stab_initialize(unsigned long stab);
extern void slb_vmalloc_update(void);
+extern void slb_set_size(u16 size);
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index fb57ded592f9..7ffbb65ff7a9 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -17,6 +17,7 @@
#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
+#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
/*
* This is individual features
@@ -57,6 +58,15 @@
*/
#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000)
+/* Enable use of TLB reservation. Processor should support tlbsrx.
+ * instruction and MAS0[WQ].
+ */
+#define MMU_FTR_USE_TLBRSRV ASM_CONST(0x00800000)
+
+/* Use paired MAS registers (MAS7||MAS3, etc.)
+ */
+#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
+
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
@@ -73,6 +83,41 @@ extern void early_init_mmu_secondary(void);
#endif /* !__ASSEMBLY__ */
+/* The kernel use the constants below to index in the page sizes array.
+ * The use of fixed constants for this purpose is better for performances
+ * of the low level hash refill handlers.
+ *
+ * A non supported page size has a "shift" field set to 0
+ *
+ * Any new page size being implemented can get a new entry in here. Whether
+ * the kernel will use it or not is a different matter though. The actual page
+ * size used by hugetlbfs is not defined here and may be made variable
+ *
+ * Note: This array ended up being a false good idea as it's growing to the
+ * point where I wonder if we should replace it with something different,
+ * to think about, feedback welcome. --BenH.
+ */
+
+/* There are #define as they have to be used in assembly
+ *
+ * WARNING: If you change this list, make sure to update the array of
+ * names currently in arch/powerpc/mm/hugetlbpage.c or bad things will
+ * happen
+ */
+#define MMU_PAGE_4K 0
+#define MMU_PAGE_16K 1
+#define MMU_PAGE_64K 2
+#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
+#define MMU_PAGE_256K 4
+#define MMU_PAGE_1M 5
+#define MMU_PAGE_8M 6
+#define MMU_PAGE_16M 7
+#define MMU_PAGE_256M 8
+#define MMU_PAGE_1G 9
+#define MMU_PAGE_16G 10
+#define MMU_PAGE_64G 11
+#define MMU_PAGE_COUNT 12
+
#if defined(CONFIG_PPC_STD_MMU_64)
/* 64-bit classic hash table MMU */
@@ -94,5 +139,6 @@ extern void early_init_mmu_secondary(void);
# include <asm/mmu-8xx.h>
#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b7063669f972..b34e94d94435 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -14,7 +14,6 @@
/*
* Most if the context management is out of line
*/
-extern void mmu_context_init(void);
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
@@ -23,6 +22,12 @@ extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
extern void set_context(unsigned long id, pgd_t *pgd);
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void mmu_context_init(void) { }
+#else
+extern void mmu_context_init(void);
+#endif
+
/*
* switch_mm is the entry point called from the architecture independent
* code in kernel/sched.c
@@ -38,6 +43,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
tsk->thread.pgdir = next->pgd;
#endif /* CONFIG_PPC32 */
+ /* 64-bit Book3E keeps track of current PGD in the PACA */
+#ifdef CONFIG_PPC_BOOK3E_64
+ get_paca()->pgd = next->pgd;
+#endif
/* Nothing else to do if we aren't actually switching */
if (prev == next)
return;
@@ -84,6 +93,10 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
+ /* 64-bit Book3E keeps track of current PGD in the PACA */
+#ifdef CONFIG_PPC_BOOK3E_64
+ get_paca()->pgd = NULL;
+#endif
}
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index efde5ac82f7b..6c587eddee59 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -107,6 +107,9 @@ extern void pmac_xpram_write(int xpaddr, u8 data);
/* Synchronize NVRAM */
extern void nvram_sync(void);
+/* Determine NVRAM size */
+extern ssize_t nvram_get_size(void);
+
/* Normal access to NVRAM */
extern unsigned char nvram_read_byte(int i);
extern void nvram_write_byte(unsigned char c, int i);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index c8a3cbfe02ff..b634456ea893 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -14,9 +14,11 @@
#define _ASM_POWERPC_PACA_H
#ifdef __KERNEL__
-#include <asm/types.h>
-#include <asm/lppaca.h>
-#include <asm/mmu.h>
+#include <asm/types.h>
+#include <asm/lppaca.h>
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/exception-64e.h>
register struct paca_struct *local_paca asm("r13");
@@ -91,6 +93,21 @@ struct paca_struct {
u16 slb_cache[SLB_CACHE_ENTRIES];
#endif /* CONFIG_PPC_STD_MMU_64 */
+#ifdef CONFIG_PPC_BOOK3E
+ pgd_t *pgd; /* Current PGD */
+ pgd_t *kernel_pgd; /* Kernel PGD */
+ u64 exgen[8] __attribute__((aligned(0x80)));
+ u64 extlb[EX_TLB_SIZE*3] __attribute__((aligned(0x80)));
+ u64 exmc[8]; /* used for machine checks */
+ u64 excrit[8]; /* used for crit interrupts */
+ u64 exdbg[8]; /* used for debug interrupts */
+
+ /* Kernel stack pointers for use by special exceptions */
+ void *mc_kstack;
+ void *crit_kstack;
+ void *dbg_kstack;
+#endif /* CONFIG_PPC_BOOK3E */
+
mm_context_t context;
/*
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 4940662ee87e..ff24254990e1 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -139,7 +139,11 @@ extern phys_addr_t kernstart_addr;
* Don't compare things with KERNELBASE or PAGE_OFFSET to test for
* "kernelness", use is_kernel_addr() - it should do what you want.
*/
+#ifdef CONFIG_PPC_BOOK3E_64
+#define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
+#else
#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+#endif
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 5817a3b747e5..3f17b83f55a1 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -135,12 +135,22 @@ extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
#endif /* __ASSEMBLY__ */
#else
#define slice_init()
+#ifdef CONFIG_PPC_STD_MMU_64
#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
#define slice_set_user_psize(mm, psize) \
do { \
(mm)->context.user_psize = (psize); \
(mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
} while (0)
+#else /* CONFIG_PPC_STD_MMU_64 */
+#ifdef CONFIG_PPC_64K_PAGES
+#define get_slice_psize(mm, addr) MMU_PAGE_64K
+#else /* CONFIG_PPC_64K_PAGES */
+#define get_slice_psize(mm, addr) MMU_PAGE_4K
+#endif /* !CONFIG_PPC_64K_PAGES */
+#define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
+#endif /* !CONFIG_PPC_STD_MMU_64 */
+
#define slice_set_range_psize(mm, start, len, psize) \
slice_set_user_psize((mm), (psize))
#define slice_mm_new_context(mm) 1
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 4c61fa0b8d75..76e1f313a58e 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -77,9 +77,7 @@ struct pci_controller {
int first_busno;
int last_busno;
-#ifndef CONFIG_PPC64
int self_busno;
-#endif
void __iomem *io_base_virt;
#ifdef CONFIG_PPC64
@@ -104,7 +102,6 @@ struct pci_controller {
unsigned int __iomem *cfg_addr;
void __iomem *cfg_data;
-#ifndef CONFIG_PPC64
/*
* Used for variants of PCI indirect handling and possible quirks:
* SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
@@ -128,7 +125,6 @@ struct pci_controller {
#define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010
#define PPC_INDIRECT_TYPE_BROKEN_MRM 0x00000020
u32 indirect_type;
-#endif /* !CONFIG_PPC64 */
/* Currently, we limit ourselves to 1 IO range and 3 mem
* ranges since the common pci_bus structure can't handle more
*/
@@ -146,21 +142,6 @@ struct pci_controller {
#endif /* CONFIG_PPC64 */
};
-#ifndef CONFIG_PPC64
-
-static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
-{
- return bus->sysdata;
-}
-
-static inline int isa_vaddr_is_ioport(void __iomem *address)
-{
- /* No specific ISA handling on ppc32 at this stage, it
- * all goes through PCI
- */
- return 0;
-}
-
/* These are used for config access before all the PCI probing
has been done. */
extern int early_read_config_byte(struct pci_controller *hose, int bus,
@@ -182,6 +163,22 @@ extern int early_find_capability(struct pci_controller *hose, int bus,
extern void setup_indirect_pci(struct pci_controller* hose,
resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags);
+
+#ifndef CONFIG_PPC64
+
+static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
+{
+ return bus->sysdata;
+}
+
+static inline int isa_vaddr_is_ioport(void __iomem *address)
+{
+ /* No specific ISA handling on ppc32 at this stage, it
+ * all goes through PCI
+ */
+ return 0;
+}
+
#else /* CONFIG_PPC64 */
/*
@@ -284,11 +281,6 @@ static inline int isa_vaddr_is_ioport(void __iomem *address)
extern int pcibios_unmap_io_space(struct pci_bus *bus);
extern int pcibios_map_io_space(struct pci_bus *bus);
-/* Return values for ppc_md.pci_probe_mode function */
-#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
-#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
-#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
-
#ifdef CONFIG_NUMA
#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE))
#else
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index d9483c504d2d..7aca4839387b 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -22,6 +22,11 @@
#include <asm-generic/pci-dma-compat.h>
+/* Return values for ppc_md.pci_probe_mode function */
+#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
+#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
+#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
+
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
@@ -61,8 +66,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#ifdef CONFIG_PCI
-extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
-extern struct dma_mapping_ops *get_pci_dma_ops(void);
+extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
+extern struct dma_map_ops *get_pci_dma_ops(void);
#else /* CONFIG_PCI */
#define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL
@@ -228,6 +233,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
extern void pcibios_setup_bus_self(struct pci_bus *bus);
+extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
+extern void pcibios_scan_phb(struct pci_controller *hose, void *sysdata);
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 1730e5e298d6..f2e812de7c3c 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -4,6 +4,15 @@
#include <linux/mm.h>
+#ifdef CONFIG_PPC_BOOK3E
+extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
+#else /* CONFIG_PPC_BOOK3E */
+static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
+ unsigned long address)
+{
+}
+#endif /* !CONFIG_PPC_BOOK3E */
+
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
@@ -19,7 +28,12 @@ typedef struct pgtable_free {
unsigned long val;
} pgtable_free_t;
-#define PGF_CACHENUM_MASK 0x7
+/* This needs to be big enough to allow for MMU_PAGE_COUNT + 2 to be stored
+ * and small enough to fit in the low bits of any naturally aligned page
+ * table cache entry. Arbitrarily set to 0x1f, that should give us some
+ * room to grow
+ */
+#define PGF_CACHENUM_MASK 0x1f
static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
unsigned long mask)
@@ -35,19 +49,27 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
#include <asm/pgalloc-32.h>
#endif
-extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
-
#ifdef CONFIG_SMP
-#define __pte_free_tlb(tlb,ptepage,address) \
-do { \
- pgtable_page_dtor(ptepage); \
- pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
- PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
-} while (0)
-#else
-#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte))
-#endif
+extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
+extern void pte_free_finish(void);
+#else /* CONFIG_SMP */
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
+{
+ pgtable_free(pgf);
+}
+static inline void pte_free_finish(void) { }
+#endif /* !CONFIG_SMP */
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
+ unsigned long address)
+{
+ pgtable_free_t pgf = pgtable_free_cache(page_address(ptepage),
+ PTE_NONCACHE_NUM,
+ PTE_TABLE_SIZE-1);
+ tlb_flush_pgtable(tlb, address);
+ pgtable_page_dtor(ptepage);
+ pgtable_free_tlb(tlb, pgf);
+}
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index c9ff9d75990e..55646adfa843 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -111,6 +111,8 @@ extern int icache_44x_need_flush;
#include <asm/pte-40x.h>
#elif defined(CONFIG_44x)
#include <asm/pte-44x.h>
+#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
+#include <asm/pte-book3e.h>
#elif defined(CONFIG_FSL_BOOKE)
#include <asm/pte-fsl-booke.h>
#elif defined(CONFIG_8xx)
@@ -186,7 +188,7 @@ static inline unsigned long pte_update(pte_t *p,
#endif /* !PTE_ATOMIC_UPDATES */
#ifdef CONFIG_44x
- if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
+ if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
icache_44x_need_flush = 1;
#endif
return old;
@@ -217,7 +219,7 @@ static inline unsigned long long pte_update(pte_t *p,
#endif /* !PTE_ATOMIC_UPDATES */
#ifdef CONFIG_44x
- if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
+ if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
icache_44x_need_flush = 1;
#endif
return old;
@@ -267,8 +269,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
{
unsigned long bits = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
- _PAGE_HWEXEC | _PAGE_EXEC);
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
pte_update(ptep, 0, bits);
}
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index 6cc085b945a5..90533ddcd703 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -10,10 +10,10 @@
#define PGD_INDEX_SIZE 4
#ifndef __ASSEMBLY__
-
#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
@@ -32,8 +32,6 @@
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#endif /* __ASSEMBLY__ */
-
/* Bits to mask out from a PMD to get to the PTE page */
#define PMD_MASKED_BITS 0x1ff
/* Bits to mask out from a PGD/PUD to get to the PMD page */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 8cd083c61503..806abe7a3fa5 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -5,11 +5,6 @@
* the ppc64 hashed page table.
*/
-#ifndef __ASSEMBLY__
-#include <linux/stddef.h>
-#include <asm/tlbflush.h>
-#endif /* __ASSEMBLY__ */
-
#ifdef CONFIG_PPC_64K_PAGES
#include <asm/pgtable-ppc64-64k.h>
#else
@@ -38,26 +33,47 @@
#endif
/*
- * Define the address range of the vmalloc VM area.
+ * Define the address range of the kernel non-linear virtual area
+ */
+
+#ifdef CONFIG_PPC_BOOK3E
+#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
+#else
+#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
+#endif
+#define KERN_VIRT_SIZE PGTABLE_RANGE
+
+/*
+ * The vmalloc space starts at the beginning of that region, and
+ * occupies half of it on hash CPUs and a quarter of it on Book3E
+ * (we keep a quarter for the virtual memmap)
*/
-#define VMALLOC_START ASM_CONST(0xD000000000000000)
-#define VMALLOC_SIZE (PGTABLE_RANGE >> 1)
-#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+#define VMALLOC_START KERN_VIRT_START
+#ifdef CONFIG_PPC_BOOK3E
+#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
+#else
+#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
+#endif
+#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
/*
- * Define the address ranges for MMIO and IO space :
+ * The second half of the kernel virtual space is used for IO mappings,
+ * it's itself carved into the PIO region (ISA and PHB IO space) and
+ * the ioremap space
*
- * ISA_IO_BASE = VMALLOC_END, 64K reserved area
+ * ISA_IO_BASE = KERN_IO_START, 64K reserved area
* PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
* IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
*/
+#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
#define FULL_IO_SIZE 0x80000000ul
-#define ISA_IO_BASE (VMALLOC_END)
-#define ISA_IO_END (VMALLOC_END + 0x10000ul)
+#define ISA_IO_BASE (KERN_IO_START)
+#define ISA_IO_END (KERN_IO_START + 0x10000ul)
#define PHB_IO_BASE (ISA_IO_END)
-#define PHB_IO_END (VMALLOC_END + FULL_IO_SIZE)
+#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
-#define IOREMAP_END (VMALLOC_START + PGTABLE_RANGE)
+#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
+
/*
* Region IDs
@@ -68,23 +84,32 @@
#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
-#define VMEMMAP_REGION_ID (0xfUL)
+#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
#define USER_REGION_ID (0UL)
/*
- * Defines the address of the vmemap area, in its own region
+ * Defines the address of the vmemap area, in its own region on
+ * hash table CPUs and after the vmalloc space on Book3E
*/
+#ifdef CONFIG_PPC_BOOK3E
+#define VMEMMAP_BASE VMALLOC_END
+#define VMEMMAP_END KERN_IO_START
+#else
#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
+#endif
#define vmemmap ((struct page *)VMEMMAP_BASE)
/*
* Include the PTE bits definitions
*/
+#ifdef CONFIG_PPC_BOOK3S
#include <asm/pte-hash64.h>
+#else
+#include <asm/pte-book3e.h>
+#endif
#include <asm/pte-common.h>
-
#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
@@ -92,6 +117,9 @@
#ifndef __ASSEMBLY__
+#include <linux/stddef.h>
+#include <asm/tlbflush.h>
+
/*
* This is the default implementation of various PTE accessors, it's
* used in all cases except Book3S with 64K pages where we have a
@@ -285,8 +313,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
{
unsigned long bits = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
- _PAGE_EXEC | _PAGE_HWEXEC);
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
#ifdef PTE_ATOMIC_UPDATES
unsigned long old, tmp;
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
index d6a616a1b3ea..ccc68b50d05d 100644
--- a/arch/powerpc/include/asm/pmc.h
+++ b/arch/powerpc/include/asm/pmc.h
@@ -27,10 +27,22 @@ extern perf_irq_t perf_irq;
int reserve_pmc_hardware(perf_irq_t new_perf_irq);
void release_pmc_hardware(void);
+void ppc_enable_pmcs(void);
#ifdef CONFIG_PPC64
-void power4_enable_pmcs(void);
-void pasemi_enable_pmcs(void);
+#include <asm/lppaca.h>
+
+static inline void ppc_set_pmu_inuse(int inuse)
+{
+ get_lppaca()->pmcregs_in_use = inuse;
+}
+
+extern void power4_enable_pmcs(void);
+
+#else /* CONFIG_PPC64 */
+
+static inline void ppc_set_pmu_inuse(int inuse) { }
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index b74f16d45cb4..ef9aa84cac5a 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -48,6 +48,8 @@
#define PPC_INST_TLBIE 0x7c000264
#define PPC_INST_TLBILX 0x7c000024
#define PPC_INST_WAIT 0x7c00007c
+#define PPC_INST_TLBIVAX 0x7c000624
+#define PPC_INST_TLBSRX_DOT 0x7c0006a5
/* macros to insert fields into opcodes */
#define __PPC_RA(a) (((a) & 0x1f) << 16)
@@ -76,6 +78,10 @@
__PPC_WC(w))
#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \
__PPC_RB(a) | __PPC_RS(lp))
+#define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
+ __PPC_RA(a) | __PPC_RB(b))
/*
* Define what the VSX XX1 form instructions will look like, then add
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 854ab713f56c..2828f9d0f66d 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -39,7 +39,6 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
extern void pci_devs_phb_init(void);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
-extern void scan_phb(struct pci_controller *hose);
/* From rtas_pci.h */
extern void init_pci_config_tokens (void);
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index f9729529c20d..498fe09263d3 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -98,13 +98,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
-#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
+#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b
#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
-#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
+#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b
#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
@@ -112,26 +112,26 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
/* Save the lower 32 VSRs in the thread VSR region */
-#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,b,base)
+#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,base,b)
#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
-#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,b,base)
+#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,base,b)
#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
-#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,b,base)
+#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,base,b)
#define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
#define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
#define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
-#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,b,base)
+#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,base,b)
#define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
#define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
#define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
@@ -375,8 +375,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define PPC440EP_ERR42
#endif
-
-#if defined(CONFIG_BOOKE)
+/*
+ * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
+ * keep the address intact to be compatible with code shared with
+ * 32-bit classic.
+ *
+ * On the other hand, I find it useful to have them behave as expected
+ * by their name (ie always do the addition) on 64-bit BookE
+ */
+#if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
#define toreal(rd)
#define fromreal(rd)
@@ -426,10 +433,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
.previous
#endif
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
#define RFI rfid
#define MTMSRD(r) mtmsrd r
-
#else
#define FIX_SRR1(ra, rb)
#ifndef CONFIG_40x
diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/pte-40x.h
index 07630faae029..6c3e1f4378d4 100644
--- a/arch/powerpc/include/asm/pte-40x.h
+++ b/arch/powerpc/include/asm/pte-40x.h
@@ -46,7 +46,7 @@
#define _PAGE_RW 0x040 /* software: Writes permitted */
#define _PAGE_DIRTY 0x080 /* software: dirty page */
#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
-#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
+#define _PAGE_EXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
diff --git a/arch/powerpc/include/asm/pte-44x.h b/arch/powerpc/include/asm/pte-44x.h
index 37e98bcf83e0..4192b9bad901 100644
--- a/arch/powerpc/include/asm/pte-44x.h
+++ b/arch/powerpc/include/asm/pte-44x.h
@@ -78,7 +78,7 @@
#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
#define _PAGE_RW 0x00000002 /* S: Write permission */
#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
-#define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */
+#define _PAGE_EXEC 0x00000004 /* H: Execute permission */
#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index 8c6e31251034..94e979718dcf 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -36,7 +36,6 @@
/* These five software bits must be masked out when the entry is loaded
* into the TLB.
*/
-#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
#define _PAGE_GUARDED 0x0010 /* software: guarded access */
#define _PAGE_DIRTY 0x0020 /* software: page changed */
#define _PAGE_RW 0x0040 /* software: user write access allowed */
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
new file mode 100644
index 000000000000..082d515930a2
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -0,0 +1,84 @@
+#ifndef _ASM_POWERPC_PTE_BOOK3E_H
+#define _ASM_POWERPC_PTE_BOOK3E_H
+#ifdef __KERNEL__
+
+/* PTE bit definitions for processors compliant to the Book3E
+ * architecture 2.06 or later. The position of the PTE bits
+ * matches the HW definition of the optional Embedded Page Table
+ * category.
+ */
+
+/* Architected bits */
+#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */
+#define _PAGE_FILE 0x000002 /* (!present only) software: pte holds file offset */
+#define _PAGE_SW1 0x000002
+#define _PAGE_BAP_SR 0x000004
+#define _PAGE_BAP_UR 0x000008
+#define _PAGE_BAP_SW 0x000010
+#define _PAGE_BAP_UW 0x000020
+#define _PAGE_BAP_SX 0x000040
+#define _PAGE_BAP_UX 0x000080
+#define _PAGE_PSIZE_MSK 0x000f00
+#define _PAGE_PSIZE_4K 0x000200
+#define _PAGE_PSIZE_8K 0x000300
+#define _PAGE_PSIZE_16K 0x000400
+#define _PAGE_PSIZE_32K 0x000500
+#define _PAGE_PSIZE_64K 0x000600
+#define _PAGE_PSIZE_128K 0x000700
+#define _PAGE_PSIZE_256K 0x000800
+#define _PAGE_PSIZE_512K 0x000900
+#define _PAGE_PSIZE_1M 0x000a00
+#define _PAGE_PSIZE_2M 0x000b00
+#define _PAGE_PSIZE_4M 0x000c00
+#define _PAGE_PSIZE_8M 0x000d00
+#define _PAGE_PSIZE_16M 0x000e00
+#define _PAGE_PSIZE_32M 0x000f00
+#define _PAGE_DIRTY 0x001000 /* C: page changed */
+#define _PAGE_SW0 0x002000
+#define _PAGE_U3 0x004000
+#define _PAGE_U2 0x008000
+#define _PAGE_U1 0x010000
+#define _PAGE_U0 0x020000
+#define _PAGE_ACCESSED 0x040000
+#define _PAGE_LENDIAN 0x080000
+#define _PAGE_GUARDED 0x100000
+#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
+#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */
+
+/* "Higher level" linux bit combinations */
+#define _PAGE_EXEC _PAGE_BAP_UX /* .. and was cache cleaned */
+#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
+#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RO (_PAGE_BAP_SR)
+#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
+#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX)
+#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
+
+#define _PAGE_HASHPTE 0
+#define _PAGE_BUSY 0
+
+#define _PAGE_SPECIAL _PAGE_SW0
+
+/* Flags to be preserved on PTE modifications */
+#define _PAGE_HPTEFLAGS _PAGE_BUSY
+
+/* Base page size */
+#ifdef CONFIG_PPC_64K_PAGES
+#define _PAGE_PSIZE _PAGE_PSIZE_64K
+#define PTE_RPN_SHIFT (28)
+#else
+#define _PAGE_PSIZE _PAGE_PSIZE_4K
+#define PTE_RPN_SHIFT (24)
+#endif
+
+/* On 32-bit, we never clear the top part of the PTE */
+#ifdef CONFIG_PPC32
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index a7e210b6b48c..c3b65076a263 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -13,9 +13,6 @@
#ifndef _PAGE_HWWRITE
#define _PAGE_HWWRITE 0
#endif
-#ifndef _PAGE_HWEXEC
-#define _PAGE_HWEXEC 0
-#endif
#ifndef _PAGE_EXEC
#define _PAGE_EXEC 0
#endif
@@ -34,6 +31,9 @@
#ifndef _PAGE_4K_PFN
#define _PAGE_4K_PFN 0
#endif
+#ifndef _PAGE_SAO
+#define _PAGE_SAO 0
+#endif
#ifndef _PAGE_PSIZE
#define _PAGE_PSIZE 0
#endif
@@ -45,10 +45,16 @@
#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
#endif
#ifndef _PAGE_KERNEL_RO
-#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_RO 0
+#endif
+#ifndef _PAGE_KERNEL_ROX
+#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
#endif
#ifndef _PAGE_KERNEL_RW
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
+#endif
+#ifndef _PAGE_KERNEL_RWX
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
#endif
#ifndef _PAGE_HPTEFLAGS
#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
@@ -93,8 +99,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
_PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
_PAGE_USER | _PAGE_ACCESSED | \
- _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
- _PAGE_EXEC | _PAGE_HWEXEC)
+ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
/*
* We define 2 sets of base prot bits, one for basic pages (ie,
@@ -151,11 +156,9 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
_PAGE_NO_CACHE)
#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
_PAGE_NO_CACHE | _PAGE_GUARDED)
-#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC | \
- _PAGE_HWEXEC)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
-#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC | \
- _PAGE_HWEXEC)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
/* Protection used for kernel text. We want the debuggers to be able to
* set breakpoints anywhere, so don't write protect the kernel text
diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/pte-fsl-booke.h
index 10820f58acf5..2c12be5f677a 100644
--- a/arch/powerpc/include/asm/pte-fsl-booke.h
+++ b/arch/powerpc/include/asm/pte-fsl-booke.h
@@ -23,7 +23,7 @@
#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
#define _PAGE_RW 0x00004 /* S: Write permission (SW) */
#define _PAGE_DIRTY 0x00008 /* S: Page dirty */
-#define _PAGE_HWEXEC 0x00010 /* H: SX permission */
+#define _PAGE_EXEC 0x00010 /* H: SX permission */
#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
#define _PAGE_ENDIAN 0x00040 /* H: E bit */
@@ -33,13 +33,6 @@
#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
#define _PAGE_SPECIAL 0x00800 /* S: Special page */
-#ifdef CONFIG_PTE_64BIT
-/* ERPN in a PTE never gets cleared, ignore it */
-#define _PTE_NONE_MASK 0xffffffffffff0000ULL
-/* We extend the size of the PTE flags area when using 64-bit PTEs */
-#define PTE_RPN_SHIFT (PAGE_SHIFT + 8)
-#endif
-
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
index 16e571c7f9ef..4aad4132d0a8 100644
--- a/arch/powerpc/include/asm/pte-hash32.h
+++ b/arch/powerpc/include/asm/pte-hash32.h
@@ -26,7 +26,6 @@
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
-#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
#define _PAGE_RW 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1170267736d3..6315edc205d8 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -98,19 +98,15 @@
#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC_BOOK3S_64)
+/* Server variant */
#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV
#define MSR_KERNEL MSR_ | MSR_SF
-
#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
#define MSR_USER64 MSR_USER32 | MSR_SF
-
-#else /* 32-bit */
+#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx)
/* Default MSR for kernel mode. */
-#ifndef MSR_KERNEL /* reg_booke.h also defines this */
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
-#endif
-
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
#endif
@@ -646,6 +642,137 @@
#endif
/*
+ * SPRG usage:
+ *
+ * All 64-bit:
+ * - SPRG1 stores PACA pointer
+ *
+ * 64-bit server:
+ * - SPRG0 unused (reserved for HV on Power4)
+ * - SPRG2 scratch for exception vectors
+ * - SPRG3 unused (user visible)
+ *
+ * 64-bit embedded
+ * - SPRG0 generic exception scratch
+ * - SPRG2 TLB exception stack
+ * - SPRG3 unused (user visible)
+ * - SPRG4 unused (user visible)
+ * - SPRG6 TLB miss scratch (user visible, sorry !)
+ * - SPRG7 critical exception scratch
+ * - SPRG8 machine check exception scratch
+ * - SPRG9 debug exception scratch
+ *
+ * All 32-bit:
+ * - SPRG3 current thread_info pointer
+ * (virtual on BookE, physical on others)
+ *
+ * 32-bit classic:
+ * - SPRG0 scratch for exception vectors
+ * - SPRG1 scratch for exception vectors
+ * - SPRG2 indicator that we are in RTAS
+ * - SPRG4 (603 only) pseudo TLB LRU data
+ *
+ * 32-bit 40x:
+ * - SPRG0 scratch for exception vectors
+ * - SPRG1 scratch for exception vectors
+ * - SPRG2 scratch for exception vectors
+ * - SPRG4 scratch for exception vectors (not 403)
+ * - SPRG5 scratch for exception vectors (not 403)
+ * - SPRG6 scratch for exception vectors (not 403)
+ * - SPRG7 scratch for exception vectors (not 403)
+ *
+ * 32-bit 440 and FSL BookE:
+ * - SPRG0 scratch for exception vectors
+ * - SPRG1 scratch for exception vectors (*)
+ * - SPRG2 scratch for crit interrupts handler
+ * - SPRG4 scratch for exception vectors
+ * - SPRG5 scratch for exception vectors
+ * - SPRG6 scratch for machine check handler
+ * - SPRG7 scratch for exception vectors
+ * - SPRG9 scratch for debug vectors (e500 only)
+ *
+ * Additionally, BookE separates "read" and "write"
+ * of those registers. That allows to use the userspace
+ * readable variant for reads, which can avoid a fault
+ * with KVM type virtualization.
+ *
+ * (*) Under KVM, the host SPRG1 is used to point to
+ * the current VCPU data structure
+ *
+ * 32-bit 8xx:
+ * - SPRG0 scratch for exception vectors
+ * - SPRG1 scratch for exception vectors
+ * - SPRG2 apparently unused but initialized
+ *
+ */
+#ifdef CONFIG_PPC64
+#define SPRN_SPRG_PACA SPRN_SPRG1
+#else
+#define SPRN_SPRG_THREAD SPRN_SPRG3
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#define SPRN_SPRG_SCRATCH0 SPRN_SPRG2
+#endif
+
+#ifdef CONFIG_PPC_BOOK3E_64
+#define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8
+#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7
+#define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9
+#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2
+#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6
+#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_RTAS SPRN_SPRG2
+#define SPRN_SPRG_603_LRU SPRN_SPRG4
+#endif
+
+#ifdef CONFIG_40x
+#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2
+#define SPRN_SPRG_SCRATCH3 SPRN_SPRG4
+#define SPRN_SPRG_SCRATCH4 SPRN_SPRG5
+#define SPRN_SPRG_SCRATCH5 SPRN_SPRG6
+#define SPRN_SPRG_SCRATCH6 SPRN_SPRG7
+#endif
+
+#ifdef CONFIG_BOOKE
+#define SPRN_SPRG_RSCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_WSCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_RSCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_WSCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_RSCRATCH_CRIT SPRN_SPRG2
+#define SPRN_SPRG_WSCRATCH_CRIT SPRN_SPRG2
+#define SPRN_SPRG_RSCRATCH2 SPRN_SPRG4R
+#define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W
+#define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R
+#define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W
+#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG6R
+#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG6W
+#define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R
+#define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W
+#ifdef CONFIG_E200
+#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG6R
+#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG6W
+#else
+#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9
+#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9
+#endif
+#define SPRN_SPRG_RVCPU SPRN_SPRG1
+#define SPRN_SPRG_WVCPU SPRN_SPRG1
+#endif
+
+#ifdef CONFIG_8xx
+#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
+#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
+#endif
+
+/*
* An mtfsf instruction with the L bit set. On CPUs that support this a
* full 64bits of FPSCR is restored and on other CPUs the L bit is ignored.
*
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 6bcf364cbb2f..3bf783505528 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -18,18 +18,26 @@
#define MSR_IS MSR_IR /* Instruction Space */
#define MSR_DS MSR_DR /* Data Space */
#define MSR_PMM (1<<2) /* Performance monitor mark bit */
+#define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */
-/* Default MSR for kernel mode. */
-#if defined (CONFIG_40x)
+#if defined(CONFIG_PPC_BOOK3E_64)
+#define MSR_ MSR_ME | MSR_CE
+#define MSR_KERNEL MSR_ | MSR_CM
+#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
+#define MSR_USER64 MSR_USER32 | MSR_CM
+#elif defined (CONFIG_40x)
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
-#elif defined(CONFIG_BOOKE)
+#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
+#else
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_CE)
+#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
#endif
/* Special Purpose Registers (SPRNs)*/
#define SPRN_DECAR 0x036 /* Decrementer Auto Reload Register */
#define SPRN_IVPR 0x03F /* Interrupt Vector Prefix Register */
#define SPRN_USPRG0 0x100 /* User Special Purpose Register General 0 */
+#define SPRN_SPRG3R 0x103 /* Special Purpose Register General 3 Read */
#define SPRN_SPRG4R 0x104 /* Special Purpose Register General 4 Read */
#define SPRN_SPRG5R 0x105 /* Special Purpose Register General 5 Read */
#define SPRN_SPRG6R 0x106 /* Special Purpose Register General 6 Read */
@@ -38,11 +46,18 @@
#define SPRN_SPRG5W 0x115 /* Special Purpose Register General 5 Write */
#define SPRN_SPRG6W 0x116 /* Special Purpose Register General 6 Write */
#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
+#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
+#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
+#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
+#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
+#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
+#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */
+#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */
#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
@@ -93,6 +108,8 @@
#define SPRN_PID2 0x27A /* Process ID Register 2 */
#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */
#define SPRN_TLB1CFG 0x2B1 /* TLB 1 Config Register */
+#define SPRN_TLB2CFG 0x2B2 /* TLB 2 Config Register */
+#define SPRN_TLB3CFG 0x2B3 /* TLB 3 Config Register */
#define SPRN_EPR 0x2BE /* External Proxy Register */
#define SPRN_CCR1 0x378 /* Core Configuration Register 1 */
#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */
@@ -415,16 +432,31 @@
#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */
#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */
-/* Bit definitions for MMUCSR0 */
-#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
-#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
-#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
-#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
-
/* Bit definitions for SGR. */
#define SGR_NORMAL 0 /* Speculative fetching allowed. */
#define SGR_GUARDED 1 /* Speculative fetching disallowed. */
+/* Bit definitions for EPCR */
+#define SPRN_EPCR_EXTGS 0x80000000 /* External Input interrupt
+ * directed to Guest state */
+#define SPRN_EPCR_DTLBGS 0x40000000 /* Data TLB Error interrupt
+ * directed to guest state */
+#define SPRN_EPCR_ITLBGS 0x20000000 /* Instr. TLB error interrupt
+ * directed to guest state */
+#define SPRN_EPCR_DSIGS 0x10000000 /* Data Storage interrupt
+ * directed to guest state */
+#define SPRN_EPCR_ISIGS 0x08000000 /* Instr. Storage interrupt
+ * directed to guest state */
+#define SPRN_EPCR_DUVD 0x04000000 /* Disable Hypervisor Debug */
+#define SPRN_EPCR_ICM 0x02000000 /* Interrupt computation mode
+ * (copied to MSR:CM on intr) */
+#define SPRN_EPCR_GICM 0x01000000 /* Guest Interrupt Comp. mode */
+#define SPRN_EPCR_DGTMI 0x00800000 /* Disable TLB Guest Management
+ * instructions */
+#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates
+ * for hypervisor */
+
+
/*
* The IBM-403 is an even more odd special case, as it is much
* older than the IBM-405 series. We put these down here incase someone
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 817fac0a0714..dae19342f0b9 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -1,6 +1,6 @@
#ifndef _ASM_POWERPC_SETUP_H
#define _ASM_POWERPC_SETUP_H
-#define COMMAND_LINE_SIZE 512
+#include <asm-generic/setup.h>
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index c25f73d1d842..c0d3b8af9319 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -148,6 +148,16 @@ extern struct smp_ops_t *smp_ops;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask);
+/* Definitions relative to the secondary CPU spin loop
+ * and entry point. Not all of them exist on both 32 and
+ * 64-bit but defining them all here doesn't harm
+ */
+extern void generic_secondary_smp_init(void);
+extern void generic_secondary_thread_init(void);
+extern unsigned long __secondary_hold_spinloop;
+extern unsigned long __secondary_hold_acknowledge;
+extern char __secondary_hold;
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 30891d6e2bc1..8979d4cd3d70 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -13,15 +13,13 @@
#include <linux/swiotlb.h>
-extern struct dma_mapping_ops swiotlb_dma_ops;
-extern struct dma_mapping_ops swiotlb_pci_dma_ops;
-
-int swiotlb_arch_address_needs_mapping(struct device *, dma_addr_t,
- size_t size);
+extern struct dma_map_ops swiotlb_dma_ops;
static inline void dma_mark_clean(void *addr, size_t size) {}
extern unsigned int ppc_swiotlb_enable;
int __init swiotlb_setup_bus_notifier(void);
+extern void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev);
+
#endif /* __ASM_SWIOTLB_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 370600ca2765..ed24bd92fe49 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -95,8 +95,8 @@ SYSCALL(reboot)
SYSX(sys_ni_syscall,compat_sys_old_readdir,sys_old_readdir)
SYSCALL_SPU(mmap)
SYSCALL_SPU(munmap)
-SYSCALL_SPU(truncate)
-SYSCALL_SPU(ftruncate)
+COMPAT_SYS_SPU(truncate)
+COMPAT_SYS_SPU(ftruncate)
SYSCALL_SPU(fchmod)
SYSCALL_SPU(fchown)
COMPAT_SYS_SPU(getpriority)
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e20ff7541f36..e2b428b0f7ba 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -25,57 +25,25 @@
#include <linux/pagemap.h>
-struct mmu_gather;
-
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
-#if !defined(CONFIG_PPC_STD_MMU)
-
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
-#elif defined(__powerpc64__)
-
-extern void pte_free_finish(void);
-
-static inline void tlb_flush(struct mmu_gather *tlb)
-{
- struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
-
- /* If there's a TLB batch pending, then we must flush it because the
- * pages are going to be freed and we really don't want to have a CPU
- * access a freed page because it has a stale TLB
- */
- if (tlbbatch->index)
- __flush_tlb_pending(tlbbatch);
-
- pte_free_finish();
-}
-
-#else
-
extern void tlb_flush(struct mmu_gather *tlb);
-#endif
-
/* Get the generic bits... */
#include <asm-generic/tlb.h>
-#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-
-#else
extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
unsigned long address);
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
- unsigned long address)
+ unsigned long address)
{
+#ifdef CONFIG_PPC_STD_MMU_32
if (pte_val(*ptep) & _PAGE_HASHPTE)
flush_hash_entry(tlb->mm, ptep, address);
+#endif
}
-#endif
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index abbe3419d1dd..d50a380b2b6f 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -6,7 +6,7 @@
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
- * - local_flush_tlb_mm(mm) flushes the specified mm context on
+ * - local_flush_tlb_mm(mm, full) flushes the specified mm context on
* the local processor
* - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
@@ -29,7 +29,8 @@
* specific tlbie's
*/
-#include <linux/mm.h>
+struct vm_area_struct;
+struct mm_struct;
#define MMU_NO_CONTEXT ((unsigned int)-1)
@@ -40,12 +41,18 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+ int tsize, int ind);
+
#ifdef CONFIG_SMP
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+ int tsize, int ind);
#else
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
+#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
#endif
#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 26fc449bd989..dc0419b66f17 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -7,9 +7,8 @@
#define VDSO32_LBASE 0x100000
#define VDSO64_LBASE 0x100000
-/* Default map addresses */
+/* Default map addresses for 32bit vDSO */
#define VDSO32_MBASE VDSO32_LBASE
-#define VDSO64_MBASE VDSO64_LBASE
#define VDSO_VERSION_STRING LINUX_2.6.15
OpenPOWER on IntegriCloud