summaryrefslogtreecommitdiffstats
path: root/arch/sparc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include')
-rw-r--r--arch/sparc/include/asm/btext.h6
-rw-r--r--arch/sparc/include/asm/cacheflush_32.h1
-rw-r--r--arch/sparc/include/asm/cacheflush_64.h1
-rw-r--r--arch/sparc/include/asm/fcntl.h19
-rw-r--r--arch/sparc/include/asm/leon.h45
-rw-r--r--arch/sparc/include/asm/pci_64.h2
-rw-r--r--arch/sparc/include/asm/prom.h55
-rw-r--r--arch/sparc/include/asm/rwsem.h4
-rw-r--r--arch/sparc/include/asm/smp_32.h9
-rw-r--r--arch/sparc/include/asm/socket.h2
-rw-r--r--arch/sparc/include/asm/spinlock_32.h62
-rw-r--r--arch/sparc/include/asm/spinlock_64.h54
-rw-r--r--arch/sparc/include/asm/spinlock_types.h8
-rw-r--r--arch/sparc/include/asm/string_32.h78
-rw-r--r--arch/sparc/include/asm/string_64.h25
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/uaccess_32.h15
-rw-r--r--arch/sparc/include/asm/uaccess_64.h23
-rw-r--r--arch/sparc/include/asm/unistd.h3
19 files changed, 182 insertions, 232 deletions
diff --git a/arch/sparc/include/asm/btext.h b/arch/sparc/include/asm/btext.h
new file mode 100644
index 000000000000..9b2bc6b6ed0a
--- /dev/null
+++ b/arch/sparc/include/asm/btext.h
@@ -0,0 +1,6 @@
+#ifndef _SPARC_BTEXT_H
+#define _SPARC_BTEXT_H
+
+extern int btext_find_display(void);
+
+#endif /* _SPARC_BTEXT_H */
diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
index 68ac10910271..2e468773f250 100644
--- a/arch/sparc/include/asm/cacheflush_32.h
+++ b/arch/sparc/include/asm/cacheflush_32.h
@@ -75,6 +75,7 @@ BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
extern void sparc_flush_page_to_ram(struct page *page);
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index c43321729b3b..b95384033e89 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -37,6 +37,7 @@ extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#endif
extern void __flush_dcache_range(unsigned long start, unsigned long end);
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
#define flush_icache_page(vma, pg) do { } while(0)
diff --git a/arch/sparc/include/asm/fcntl.h b/arch/sparc/include/asm/fcntl.h
index d4d9c9d852c3..3b9cfb39175e 100644
--- a/arch/sparc/include/asm/fcntl.h
+++ b/arch/sparc/include/asm/fcntl.h
@@ -1,14 +1,12 @@
#ifndef _SPARC_FCNTL_H
#define _SPARC_FCNTL_H
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_APPEND 0x0008
#define FASYNC 0x0040 /* fcntl, for BSD compatibility */
#define O_CREAT 0x0200 /* not fcntl */
#define O_TRUNC 0x0400 /* not fcntl */
#define O_EXCL 0x0800 /* not fcntl */
-#define O_SYNC 0x2000
+#define O_DSYNC 0x2000 /* used to be O_SYNC, see below */
#define O_NONBLOCK 0x4000
#if defined(__sparc__) && defined(__arch64__)
#define O_NDELAY 0x0004
@@ -20,6 +18,21 @@
#define O_DIRECT 0x100000 /* direct disk access hint */
#define O_NOATIME 0x200000
#define O_CLOEXEC 0x400000
+/*
+ * Before Linux 2.6.32 only O_DSYNC semantics were implemented, but using
+ * the O_SYNC flag. We continue to use the existing numerical value
+ * for O_DSYNC semantics now, but using the correct symbolic name for it.
+ * This new value is used to request true Posix O_SYNC semantics. It is
+ * defined in this strange way to make sure applications compiled against
+ * new headers get at least O_DSYNC semantics on older kernels.
+ *
+ * This has the nice side-effect that we can simply test for O_DSYNC
+ * wherever we do not care if O_DSYNC or O_SYNC is used.
+ *
+ * Note: __O_SYNC must never be used directly.
+ */
+#define __O_SYNC 0x800000
+#define O_SYNC (__O_SYNC|O_DSYNC)
#define F_GETOWN 5 /* for sockets. */
#define F_SETOWN 6 /* for sockets. */
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 28a42b73f64f..3ea5964c43b4 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -148,7 +148,7 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
return retval;
}
-extern inline void leon_srmmu_disabletlb(void)
+static inline void leon_srmmu_disabletlb(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
@@ -158,7 +158,7 @@ extern inline void leon_srmmu_disabletlb(void)
"i"(ASI_LEON_MMUREGS) : "memory");
}
-extern inline void leon_srmmu_enabletlb(void)
+static inline void leon_srmmu_enabletlb(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
@@ -190,7 +190,7 @@ extern void leon_init_IRQ(void);
extern unsigned long last_valid_pfn;
-extern inline unsigned long sparc_leon3_get_dcachecfg(void)
+static inline unsigned long sparc_leon3_get_dcachecfg(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
@@ -201,7 +201,7 @@ extern inline unsigned long sparc_leon3_get_dcachecfg(void)
}
/* enable snooping */
-extern inline void sparc_leon3_enable_snooping(void)
+static inline void sparc_leon3_enable_snooping(void)
{
__asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
"set 0x800000, %%l2\n\t"
@@ -209,7 +209,14 @@ extern inline void sparc_leon3_enable_snooping(void)
"sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
};
-extern inline void sparc_leon3_disable_cache(void)
+static inline int sparc_leon3_snooping_enabled(void)
+{
+ u32 cctrl;
+ __asm__ __volatile__("lda [%%g0] 2, %0\n\t" : "=r"(cctrl));
+ return (cctrl >> 23) & 1;
+};
+
+static inline void sparc_leon3_disable_cache(void)
{
__asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
"set 0x00000f, %%l2\n\t"
@@ -340,6 +347,30 @@ extern int leon_flush_needed(void);
extern void leon_switch_mm(void);
extern int srmmu_swprobe_trace;
+#ifdef CONFIG_SMP
+extern int leon_smp_nrcpus(void);
+extern void leon_clear_profile_irq(int cpu);
+extern void leon_smp_done(void);
+extern void leon_boot_cpus(void);
+extern int leon_boot_one_cpu(int i);
+void leon_init_smp(void);
+extern void cpu_probe(void);
+extern void cpu_idle(void);
+extern void init_IRQ(void);
+extern void cpu_panic(void);
+extern int __leon_processor_id(void);
+void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
+
+extern unsigned int real_irq_entry[], smpleon_ticker[];
+extern unsigned int patchme_maybe_smp_msg[];
+extern unsigned long trapbase_cpu1[];
+extern unsigned long trapbase_cpu2[];
+extern unsigned long trapbase_cpu3[];
+extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
+extern unsigned int linux_trap_ipi15_sun4m[];
+
+#endif /* CONFIG_SMP */
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
@@ -356,6 +387,10 @@ extern int srmmu_swprobe_trace;
#define leon_switch_mm() do {} while (0)
#define leon_init_IRQ() do {} while (0)
#define init_leon() do {} while (0)
+#define leon_smp_done() do {} while (0)
+#define leon_boot_cpus() do {} while (0)
+#define leon_boot_one_cpu(i) 1
+#define leon_init_smp() do {} while (0)
#endif /* !defined(CONFIG_SPARC_LEON) */
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index b63e51c3c3ee..b0576df6ec83 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -16,8 +16,6 @@
#define PCI_IRQ_NONE 0xffffffff
-#define PCI_CACHE_LINE_BYTES 64
-
static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 82a190d7efc1..f845828ca4c6 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -1,3 +1,4 @@
+#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
#ifndef _SPARC_PROM_H
#define _SPARC_PROM_H
#ifdef __KERNEL__
@@ -28,50 +29,11 @@
#define of_prop_cmp(s1, s2) strcasecmp((s1), (s2))
#define of_node_cmp(s1, s2) strcmp((s1), (s2))
-typedef u32 phandle;
-typedef u32 ihandle;
-
-struct property {
- char *name;
- int length;
- void *value;
- struct property *next;
- unsigned long _flags;
- unsigned int unique_id;
-};
-
-struct of_irq_controller;
-struct device_node {
- const char *name;
- const char *type;
- phandle node;
- char *path_component_name;
- char *full_name;
-
- struct property *properties;
- struct property *deadprops; /* removed properties */
- struct device_node *parent;
- struct device_node *child;
- struct device_node *sibling;
- struct device_node *next; /* next device of same type */
- struct device_node *allnext; /* next in list of all nodes */
- struct proc_dir_entry *pde; /* this node's proc directory */
- struct kref kref;
- unsigned long _flags;
- void *data;
- unsigned int unique_id;
-
- struct of_irq_controller *irq_trans;
-};
-
struct of_irq_controller {
unsigned int (*irq_build)(struct device_node *, unsigned int, void *);
void *data;
};
-#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
-#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
-
extern struct device_node *of_find_node_by_cpuid(int cpuid);
extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
extern struct mutex of_set_property_mutex;
@@ -89,15 +51,6 @@ extern void prom_build_devicetree(void);
extern void of_populate_present_mask(void);
extern void of_fill_in_cpu_data(void);
-/* Dummy ref counting routines - to be implemented later */
-static inline struct device_node *of_node_get(struct device_node *node)
-{
- return node;
-}
-static inline void of_node_put(struct device_node *node)
-{
-}
-
/* These routines are here to provide compatibility with how powerpc
* handles IRQ mapping for OF device nodes. We precompute and permanently
* register them in the of_device objects, whereas powerpc computes them
@@ -108,12 +61,6 @@ static inline void irq_dispose_mapping(unsigned int virq)
{
}
-/*
- * NB: This is here while we transition from using asm/prom.h
- * to linux/of.h
- */
-#include <linux/of.h>
-
extern struct device_node *of_console_device;
extern char *of_console_path;
extern char *of_console_options;
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h
index 1dc129ac2feb..6e5621006f85 100644
--- a/arch/sparc/include/asm/rwsem.h
+++ b/arch/sparc/include/asm/rwsem.h
@@ -35,8 +35,8 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) }
+{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index 58101dc70493..841905c10215 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -106,6 +106,15 @@ static inline int hard_smp4d_processor_id(void)
return cpuid;
}
+extern inline int hard_smpleon_processor_id(void)
+{
+ int cpuid;
+ __asm__ __volatile__("rd %%asr17,%0\n\t"
+ "srl %0,28,%0" :
+ "=&r" (cpuid) : );
+ return cpuid;
+}
+
#ifndef MODULE
static inline int hard_smp_processor_id(void)
{
diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h
index 3a5ae3d12088..9d3fefcff2f5 100644
--- a/arch/sparc/include/asm/socket.h
+++ b/arch/sparc/include/asm/socket.h
@@ -56,6 +56,8 @@
#define SO_TIMESTAMPING 0x0023
#define SCM_TIMESTAMPING SO_TIMESTAMPING
+#define SO_RXQ_OVFL 0x0024
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 857630cff636..7f9b9dba38a6 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -10,12 +10,12 @@
#include <asm/psr.h>
-#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
+#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
"\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
: "g2", "memory", "cc");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int result;
__asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (result == 0);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
}
@@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* Sort of like atomic_t's on Sparc, but even more clever.
*
* ------------------------------------
- * | 24-bit counter | wlock | raw_rwlock_t
+ * | 24-bit counter | wlock | arch_rwlock_t
* ------------------------------------
* 31 8 7 0
*
@@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void arch_read_lock(raw_rwlock_t *rw)
+static inline void __arch_read_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw)
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_lock(lock) \
+#define arch_read_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_lock(lock); \
+ __arch_read_lock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void arch_read_unlock(raw_rwlock_t *rw)
+static inline void __arch_read_unlock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw)
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_unlock(lock) \
+#define arch_read_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_unlock(lock); \
+ __arch_read_unlock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
*(volatile __u32 *)&lp->lock = ~0U;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return (val == 0);
}
-static inline int arch_read_trylock(raw_rwlock_t *rw)
+static inline int __arch_read_trylock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
lp = rw;
__asm__ __volatile__(
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw)
return res;
}
-#define __raw_read_trylock(lock) \
+#define arch_read_trylock(lock) \
({ unsigned long flags; \
int res; \
local_irq_save(flags); \
- res = arch_read_trylock(lock); \
+ res = __arch_read_trylock(lock); \
local_irq_restore(flags); \
res; \
})
-#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
+#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
-#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
+#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
+#define arch_write_can_lock(rw) (!(rw)->lock)
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 43e514783582..073936a8b275 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -21,13 +21,13 @@
* the spinner sections must be pre-V9 branches.
*/
-#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
+#define arch_spin_is_locked(lp) ((lp)->lock != 0)
-#define __raw_spin_unlock_wait(lp) \
+#define arch_spin_unlock_wait(lp) \
do { rmb(); \
} while((lp)->lock)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
: "memory");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long result;
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (result == 0UL);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
: "memory");
}
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long tmp1, tmp2;
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(raw_rwlock_t *lock)
+static void inline arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
: "memory");
}
-static int inline arch_read_trylock(raw_rwlock_t *lock)
+static int inline arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
return tmp1;
}
-static void inline arch_read_unlock(raw_rwlock_t *lock)
+static void inline arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_lock(raw_rwlock_t *lock)
+static void inline arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_unlock(raw_rwlock_t *lock)
+static void inline arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
: "memory");
}
-static int inline arch_write_trylock(raw_rwlock_t *lock)
+static int inline arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
@@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock)
return result;
}
-#define __raw_read_lock(p) arch_read_lock(p)
-#define __raw_read_lock_flags(p, f) arch_read_lock(p)
-#define __raw_read_trylock(p) arch_read_trylock(p)
-#define __raw_read_unlock(p) arch_read_unlock(p)
-#define __raw_write_lock(p) arch_write_lock(p)
-#define __raw_write_lock_flags(p, f) arch_write_lock(p)
-#define __raw_write_unlock(p) arch_write_unlock(p)
-#define __raw_write_trylock(p) arch_write_trylock(p)
-
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
-
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_read_lock(p) arch_read_lock(p)
+#define arch_read_lock_flags(p, f) arch_read_lock(p)
+#define arch_read_trylock(p) arch_read_trylock(p)
+#define arch_read_unlock(p) arch_read_unlock(p)
+#define arch_write_lock(p) arch_write_lock(p)
+#define arch_write_lock_flags(p, f) arch_write_lock(p)
+#define arch_write_unlock(p) arch_write_unlock(p)
+#define arch_write_trylock(p) arch_write_trylock(p)
+
+#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
+#define arch_write_can_lock(rw) (!(rw)->lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 37cbe01c585b..9c454fdeaad8 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned char lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index 6c5fddb7e6b5..edf196ee4ef8 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -16,8 +16,6 @@
#ifdef __KERNEL__
extern void __memmove(void *,const void *,__kernel_size_t);
-extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
-extern __kernel_size_t __memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
@@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t);
})
#define __HAVE_ARCH_MEMCPY
-
-static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
-{
- extern void __copy_1page(void *, const void *);
-
- if(n <= 32) {
- __builtin_memcpy(to, from, n);
- } else if (((unsigned int) to & 7) != 0) {
- /* Destination is not aligned on the double-word boundary */
- __memcpy(to, from, n);
- } else {
- switch(n) {
- case PAGE_SIZE:
- __copy_1page(to, from);
- break;
- default:
- __memcpy(to, from, n);
- break;
- }
- }
- return to;
-}
-
-static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
-{
- __memcpy(to, from, n);
- return to;
-}
-
-#undef memcpy
-#define memcpy(t, f, n) \
-(__builtin_constant_p(n) ? \
- __constant_memcpy((t),(f),(n)) : \
- __nonconstant_memcpy((t),(f),(n)))
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
-
-static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
-{
- extern void bzero_1page(void *);
- extern __kernel_size_t __bzero(void *, __kernel_size_t);
-
- if(!c) {
- if(count == PAGE_SIZE)
- bzero_1page(s);
- else
- __bzero(s, count);
- } else {
- __memset(s, c, count);
- }
- return s;
-}
-
-static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
-{
- extern __kernel_size_t __bzero(void *, __kernel_size_t);
-
- if(!c)
- __bzero(s, count);
- else
- __memset(s, c, count);
- return s;
-}
-
-static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
-{
- __memset(s, c, count);
- return s;
-}
-
-#undef memset
-#define memset(s, c, count) \
-(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
- __constant_c_and_count_memset((s), (c), (count)) : \
- __constant_c_memset((s), (c), (count))) \
- : __nonconstant_memset((s), (c), (count)))
+#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h
index 43161f2d17eb..9623bc213158 100644
--- a/arch/sparc/include/asm/string_64.h
+++ b/arch/sparc/include/asm/string_64.h
@@ -15,8 +15,6 @@
#include <asm/asi.h>
-extern void *__memset(void *,int,__kernel_size_t);
-
#ifndef EXPORT_SYMTAB_STROPS
/* First the mem*() things. */
@@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMCPY
-extern void *memcpy(void *, const void *, __kernel_size_t);
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
-extern void *__builtin_memset(void *,int,__kernel_size_t);
-
-static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
-{
- extern __kernel_size_t __bzero(void *, __kernel_size_t);
-
- if (!c) {
- __bzero(s, count);
- return s;
- } else
- return __memset(s, c, count);
-}
-
-#undef memset
-#define memset(s, c, count) \
-((__builtin_constant_p(count) && (count) <= 32) ? \
- __builtin_memset((s), (c), (count)) : \
- (__builtin_constant_p(c) ? \
- __constant_memset((s), (c), (count)) : \
- __memset((s), (c), (count))))
+#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 1b45a7bbe407..7257ebb8f394 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
/* flag bit 8 is available */
#define TIF_SECCOMP 9 /* secure computing */
#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
/* flag bit 11 is available */
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
@@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1<<TIF_FREEZE)
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 8303ac481034..489d2ba92bcb 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
return __copy_user(to, (__force void __user *) from, n);
}
+extern void copy_from_user_overflow(void)
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+ __compiletime_error("copy_from_user() buffer size is not provably correct")
+#else
+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+#endif
+;
+
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ int sz = __compiletime_object_size(to);
+
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return -EFAULT;
+ }
+
if (n && __access_ok((unsigned long) from, n))
return __copy_user((__force void __user *) to, from, n);
else
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 9ea271e19c70..dbc141660994 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -6,6 +6,7 @@
*/
#ifdef __KERNEL__
+#include <linux/errno.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -204,6 +205,14 @@ __asm__ __volatile__( \
extern int __get_user_bad(void);
+extern void copy_from_user_overflow(void)
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+ __compiletime_error("copy_from_user() buffer size is not provably correct")
+#else
+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+#endif
+;
+
extern unsigned long __must_check ___copy_from_user(void *to,
const void __user *from,
unsigned long size);
@@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_from_user(to, from, size);
-
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
+ unsigned long ret = (unsigned long) -EFAULT;
+ int sz = __compiletime_object_size(to);
+
+ if (likely(sz == -1 || sz >= size)) {
+ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+ } else {
+ copy_from_user_overflow();
+ }
return ret;
}
#define __copy_from_user copy_from_user
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 42f2316c3eaa..cb4b9bfd0d87 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -396,8 +396,9 @@
#define __NR_pwritev 325
#define __NR_rt_tgsigqueueinfo 326
#define __NR_perf_event_open 327
+#define __NR_recvmmsg 328
-#define NR_SYSCALLS 328
+#define NR_syscalls 329
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
OpenPOWER on IntegriCloud