summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/barrier.h2
-rw-r--r--include/asm-alpha/core_mcpcia.h2
-rw-r--r--include/asm-alpha/core_t2.h14
-rw-r--r--include/asm-alpha/io.h6
-rw-r--r--include/asm-alpha/mmu_context.h6
-rw-r--r--include/asm-alpha/param.h4
-rw-r--r--include/asm-alpha/percpu.h74
-rw-r--r--include/asm-alpha/pgtable.h21
-rw-r--r--include/asm-alpha/system.h10
-rw-r--r--include/asm-alpha/vga.h6
10 files changed, 119 insertions, 26 deletions
diff --git a/include/asm-alpha/barrier.h b/include/asm-alpha/barrier.h
index 384dc08d6f53..ac78eba909bc 100644
--- a/include/asm-alpha/barrier.h
+++ b/include/asm-alpha/barrier.h
@@ -24,7 +24,7 @@ __asm__ __volatile__("mb": : :"memory")
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#define smp_read_barrier_depends() barrier()
+#define smp_read_barrier_depends() do { } while (0)
#endif
#define set_mb(var, value) \
diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h
index 525b4f6a7ace..acf55b483472 100644
--- a/include/asm-alpha/core_mcpcia.h
+++ b/include/asm-alpha/core_mcpcia.h
@@ -261,7 +261,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
}
#endif
-static inline int __mcpcia_is_mmio(unsigned long addr)
+extern inline int __mcpcia_is_mmio(unsigned long addr)
{
return (addr & 0x80000000UL) == 0;
}
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h
index 90e6b5d6c214..46bfff58f670 100644
--- a/include/asm-alpha/core_t2.h
+++ b/include/asm-alpha/core_t2.h
@@ -356,13 +356,13 @@ struct el_t2_frame_corrected {
#define vip volatile int *
#define vuip volatile unsigned int *
-static inline u8 t2_inb(unsigned long addr)
+extern inline u8 t2_inb(unsigned long addr)
{
long result = *(vip) ((addr << 5) + T2_IO + 0x00);
return __kernel_extbl(result, addr & 3);
}
-static inline void t2_outb(u8 b, unsigned long addr)
+extern inline void t2_outb(u8 b, unsigned long addr)
{
unsigned long w;
@@ -371,13 +371,13 @@ static inline void t2_outb(u8 b, unsigned long addr)
mb();
}
-static inline u16 t2_inw(unsigned long addr)
+extern inline u16 t2_inw(unsigned long addr)
{
long result = *(vip) ((addr << 5) + T2_IO + 0x08);
return __kernel_extwl(result, addr & 3);
}
-static inline void t2_outw(u16 b, unsigned long addr)
+extern inline void t2_outw(u16 b, unsigned long addr)
{
unsigned long w;
@@ -386,12 +386,12 @@ static inline void t2_outw(u16 b, unsigned long addr)
mb();
}
-static inline u32 t2_inl(unsigned long addr)
+extern inline u32 t2_inl(unsigned long addr)
{
return *(vuip) ((addr << 5) + T2_IO + 0x18);
}
-static inline void t2_outl(u32 b, unsigned long addr)
+extern inline void t2_outl(u32 b, unsigned long addr)
{
*(vuip) ((addr << 5) + T2_IO + 0x18) = b;
mb();
@@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr)
set_hae(msb); \
}
-static DEFINE_SPINLOCK(t2_hae_lock);
+extern spinlock_t t2_hae_lock;
/*
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index 38f18cf18c9d..e971ab000f95 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -35,7 +35,7 @@
* register not being up-to-date with respect to the hardware
* value.
*/
-static inline void __set_hae(unsigned long new_hae)
+extern inline void __set_hae(unsigned long new_hae)
{
unsigned long flags;
local_irq_save(flags);
@@ -49,7 +49,7 @@ static inline void __set_hae(unsigned long new_hae)
local_irq_restore(flags);
}
-static inline void set_hae(unsigned long new_hae)
+extern inline void set_hae(unsigned long new_hae)
{
if (new_hae != alpha_mv.hae_cache)
__set_hae(new_hae);
@@ -176,7 +176,7 @@ REMAP2(u64, writeq, volatile)
#undef REMAP1
#undef REMAP2
-static inline void __iomem *generic_ioportmap(unsigned long a)
+extern inline void __iomem *generic_ioportmap(unsigned long a)
{
return alpha_mv.mv_ioportmap(a);
}
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 6a5be1f7debf..86c08a02d239 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -23,7 +23,7 @@
#endif
-extern inline unsigned long
+static inline unsigned long
__reload_thread(struct pcb_struct *pcb)
{
register unsigned long a0 __asm__("$16");
@@ -114,7 +114,7 @@ extern unsigned long last_asn;
#define __MMU_EXTERN_INLINE
#endif
-static inline unsigned long
+extern inline unsigned long
__get_new_mm_context(struct mm_struct *mm, long cpu)
{
unsigned long asn = cpu_last_asn(cpu);
@@ -226,7 +226,7 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
# endif
#endif
-extern inline int
+static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int i;
diff --git a/include/asm-alpha/param.h b/include/asm-alpha/param.h
index 0982f1d39499..e691ecfedb2c 100644
--- a/include/asm-alpha/param.h
+++ b/include/asm-alpha/param.h
@@ -5,8 +5,12 @@
hardware ignores reprogramming. We also need userland buy-in to the
change in HZ, since this is visible in the wait4 resources etc. */
+#ifdef __KERNEL__
#define HZ CONFIG_HZ
#define USER_HZ HZ
+#else
+#define HZ 1024
+#endif
#define EXEC_PAGESIZE 8192
diff --git a/include/asm-alpha/percpu.h b/include/asm-alpha/percpu.h
index 48348fe34c19..3495e8e00d70 100644
--- a/include/asm-alpha/percpu.h
+++ b/include/asm-alpha/percpu.h
@@ -1,6 +1,78 @@
#ifndef __ALPHA_PERCPU_H
#define __ALPHA_PERCPU_H
+#include <linux/compiler.h>
+#include <linux/threads.h>
-#include <asm-generic/percpu.h>
+/*
+ * Determine the real variable name from the name visible in the
+ * kernel sources.
+ */
+#define per_cpu_var(var) per_cpu__##var
+
+#ifdef CONFIG_SMP
+
+/*
+ * per_cpu_offset() is the offset that has to be added to a
+ * percpu variable to get to the instance for a certain processor.
+ */
+extern unsigned long __per_cpu_offset[NR_CPUS];
+
+#define per_cpu_offset(x) (__per_cpu_offset[x])
+
+#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
+#ifdef CONFIG_DEBUG_PREEMPT
+#define my_cpu_offset per_cpu_offset(smp_processor_id())
+#else
+#define my_cpu_offset __my_cpu_offset
+#endif
+
+#ifndef MODULE
+#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
+#define PER_CPU_ATTRIBUTES
+#else
+/*
+ * To calculate addresses of locally defined variables, GCC uses 32-bit
+ * displacement from the GP. Which doesn't work for per cpu variables in
+ * modules, as an offset to the kernel per cpu area is way above 4G.
+ *
+ * This forces allocation of a GOT entry for per cpu variable using
+ * ldq instruction with a 'literal' relocation.
+ */
+#define SHIFT_PERCPU_PTR(var, offset) ({ \
+ extern int simple_identifier_##var(void); \
+ unsigned long __ptr, tmp_gp; \
+ asm ( "br %1, 1f \n\
+ 1: ldgp %1, 0(%1) \n\
+ ldq %0, per_cpu__" #var"(%1)\t!literal" \
+ : "=&r"(__ptr), "=&r"(tmp_gp)); \
+ (typeof(&per_cpu_var(var)))(__ptr + (offset)); })
+
+#define PER_CPU_ATTRIBUTES __used
+
+#endif /* MODULE */
+
+/*
+ * A percpu variable may point to a discarded regions. The following are
+ * established ways to produce a usable pointer from the percpu variable
+ * offset.
+ */
+#define per_cpu(var, cpu) \
+ (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
+#define __get_cpu_var(var) \
+ (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
+#define __raw_get_cpu_var(var) \
+ (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
+
+#else /* ! SMP */
+
+#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
+#define __get_cpu_var(var) per_cpu_var(var)
+#define __raw_get_cpu_var(var) per_cpu_var(var)
+
+#define PER_CPU_ATTRIBUTES
+
+#endif /* SMP */
+
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name)
#endif /* __ALPHA_PERCPU_H */
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 05ce5fba43e3..3f0c59f6d8aa 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -287,17 +287,34 @@ extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+/*
+ * The smp_read_barrier_depends() in the following functions are required to
+ * order the load of *dir (the pointer in the top level page table) with any
+ * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
+ *
+ * If this ordering is not enforced, the CPU might load an older value of
+ * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
+ * more details.
+ *
+ * Note that we never change the mm->pgd pointer after the task is running, so
+ * pgd_offset does not require such a barrier.
+ */
+
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
- return (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+ pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+ smp_read_barrier_depends(); /* see above */
+ return ret;
}
/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
{
- return (pte_t *) pmd_page_vaddr(*dir)
+ pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
+ smp_read_barrier_depends(); /* see above */
+ return ret;
}
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index ed221d6408fc..afe20fa58c99 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -184,7 +184,7 @@ enum amask_enum {
__amask; })
#define __CALL_PAL_R0(NAME, TYPE) \
-static inline TYPE NAME(void) \
+extern inline TYPE NAME(void) \
{ \
register TYPE __r0 __asm__("$0"); \
__asm__ __volatile__( \
@@ -196,7 +196,7 @@ static inline TYPE NAME(void) \
}
#define __CALL_PAL_W1(NAME, TYPE0) \
-static inline void NAME(TYPE0 arg0) \
+extern inline void NAME(TYPE0 arg0) \
{ \
register TYPE0 __r16 __asm__("$16") = arg0; \
__asm__ __volatile__( \
@@ -207,7 +207,7 @@ static inline void NAME(TYPE0 arg0) \
}
#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
-static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
+extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \
{ \
register TYPE0 __r16 __asm__("$16") = arg0; \
register TYPE1 __r17 __asm__("$17") = arg1; \
@@ -219,7 +219,7 @@ static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
}
#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
-static inline RTYPE NAME(TYPE0 arg0) \
+extern inline RTYPE NAME(TYPE0 arg0) \
{ \
register RTYPE __r0 __asm__("$0"); \
register TYPE0 __r16 __asm__("$16") = arg0; \
@@ -232,7 +232,7 @@ static inline RTYPE NAME(TYPE0 arg0) \
}
#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
-static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
+extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
{ \
register RTYPE __r0 __asm__("$0"); \
register TYPE0 __r16 __asm__("$16") = arg0; \
diff --git a/include/asm-alpha/vga.h b/include/asm-alpha/vga.h
index e8df1e7aae6b..c00106bac521 100644
--- a/include/asm-alpha/vga.h
+++ b/include/asm-alpha/vga.h
@@ -13,7 +13,7 @@
#define VT_BUF_HAVE_MEMSETW
#define VT_BUF_HAVE_MEMCPYW
-extern inline void scr_writew(u16 val, volatile u16 *addr)
+static inline void scr_writew(u16 val, volatile u16 *addr)
{
if (__is_ioaddr(addr))
__raw_writew(val, (volatile u16 __iomem *) addr);
@@ -21,7 +21,7 @@ extern inline void scr_writew(u16 val, volatile u16 *addr)
*addr = val;
}
-extern inline u16 scr_readw(volatile const u16 *addr)
+static inline u16 scr_readw(volatile const u16 *addr)
{
if (__is_ioaddr(addr))
return __raw_readw((volatile const u16 __iomem *) addr);
@@ -29,7 +29,7 @@ extern inline u16 scr_readw(volatile const u16 *addr)
return *addr;
}
-extern inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
+static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
{
if (__is_ioaddr(s))
memsetw_io((u16 __iomem *) s, c, count);
OpenPOWER on IntegriCloud