summaryrefslogtreecommitdiffstats
path: root/include/asm-mips
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/atomic.h12
-rw-r--r--include/asm-mips/io.h18
-rw-r--r--include/asm-mips/smp.h11
-rw-r--r--include/asm-mips/system.h8
-rw-r--r--include/asm-mips/vga.h3
5 files changed, 46 insertions, 6 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 654b97d3e13a..2c8b853376c9 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -250,7 +250,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
" sc %0, %2 \n"
+ " .set noreorder \n"
" beqzl %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set reorder \n"
" sync \n"
"1: \n"
" .set mips0 \n"
@@ -266,7 +269,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
" sc %0, %2 \n"
+ " .set noreorder \n"
" beqz %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set reorder \n"
" sync \n"
"1: \n"
" .set mips0 \n"
@@ -598,7 +604,10 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
" scd %0, %2 \n"
+ " .set noreorder \n"
" beqzl %0, 1b \n"
+ " dsubu %0, %1, %3 \n"
+ " .set reorder \n"
" sync \n"
"1: \n"
" .set mips0 \n"
@@ -614,7 +623,10 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
" scd %0, %2 \n"
+ " .set noreorder \n"
" beqz %0, 1b \n"
+ " dsubu %0, %1, %3 \n"
+ " .set reorder \n"
" sync \n"
"1: \n"
" .set mips0 \n"
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index 5a4c8a54b8f4..8c011aa61afa 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -283,6 +283,24 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
__ioremap_mode((offset), (size), _CACHE_UNCACHED)
/*
+ * ioremap_cachable - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked cachable by
+ * the CPU. Also enables full write-combining. Useful for some
+ * memory-like regions on I/O busses.
+ */
+#define ioremap_cachable(offset, size) \
+ __ioremap_mode((offset), (size), PAGE_CACHABLE_DEFAULT)
+
+/*
* These two are MIPS specific ioremap variant. ioremap_cacheable_cow
* requests a cachable mapping, ioremap_uncached_accelerated requests a
* mapping using the uncached accelerated mode which isn't supported on
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 5618f1e12f40..75c6fe7c2126 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -58,7 +58,9 @@ static inline int num_booting_cpus(void)
return cpus_weight(cpu_callout_map);
}
-/* These are defined by the board-specific code. */
+/*
+ * These are defined by the board-specific code.
+ */
/*
* Cause the function described by call_data to be executed on the passed
@@ -79,7 +81,12 @@ extern void prom_boot_secondary(int cpu, struct task_struct *idle);
extern void prom_init_secondary(void);
/*
- * Detect available CPUs, populate phys_cpu_present_map before smp_init
+ * Populate cpu_possible_map before smp_init, called from setup_arch.
+ */
+extern void plat_smp_setup(void);
+
+/*
+ * Called after init_IRQ but before __cpu_up.
*/
extern void prom_prepare_cpus(unsigned int max_cpus);
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index e8e5d4143377..ddae9bae31af 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -322,7 +322,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
#endif
"2: \n"
" .set pop \n"
- : "=&r" (retval), "=m" (*m)
+ : "=&r" (retval), "=R" (*m)
: "R" (*m), "Jr" (old), "Jr" (new)
: "memory");
} else if (cpu_has_llsc) {
@@ -342,7 +342,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
#endif
"2: \n"
" .set pop \n"
- : "=&r" (retval), "=m" (*m)
+ : "=&r" (retval), "=R" (*m)
: "R" (*m), "Jr" (old), "Jr" (new)
: "memory");
} else {
@@ -379,7 +379,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
#endif
"2: \n"
" .set pop \n"
- : "=&r" (retval), "=m" (*m)
+ : "=&r" (retval), "=R" (*m)
: "R" (*m), "Jr" (old), "Jr" (new)
: "memory");
} else if (cpu_has_llsc) {
@@ -397,7 +397,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
#endif
"2: \n"
" .set pop \n"
- : "=&r" (retval), "=m" (*m)
+ : "=&r" (retval), "=R" (*m)
: "R" (*m), "Jr" (old), "Jr" (new)
: "memory");
} else {
diff --git a/include/asm-mips/vga.h b/include/asm-mips/vga.h
index ca5cec97e167..34755c0a6398 100644
--- a/include/asm-mips/vga.h
+++ b/include/asm-mips/vga.h
@@ -26,6 +26,9 @@
* <linux/vt_buffer.h> has already done the right job for us.
*/
+#undef scr_writew
+#undef scr_readw
+
static inline void scr_writew(u16 val, volatile u16 *addr)
{
*addr = cpu_to_le16(val);
OpenPOWER on IntegriCloud