summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bitops/atomic.h10
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/cacheflush.h1
-rw-r--r--include/asm-generic/fcntl.h25
-rw-r--r--include/asm-generic/gpio.h9
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/mman-common.h6
-rw-r--r--include/asm-generic/percpu.h5
-rw-r--r--include/asm-generic/socket.h1
-rw-r--r--include/asm-generic/unistd.h12
-rw-r--r--include/asm-generic/vmlinux.lds.h8
11 files changed, 67 insertions, 20 deletions
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index c8946465e63a..ecc44a8e2b44 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,19 +15,19 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 4b6755984d24..18c435d7c082 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -113,22 +113,22 @@ extern void warn_slowpath_null(const char *file, const int line);
#endif
#define WARN_ON_ONCE(condition) ({ \
- static int __warned; \
+ static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_ON(!__warned)) \
- __warned = 1; \
+ __warned = true; \
unlikely(__ret_warn_once); \
})
#define WARN_ONCE(condition, format...) ({ \
- static int __warned; \
+ static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN(!__warned, format)) \
- __warned = 1; \
+ __warned = true; \
unlikely(__ret_warn_once); \
})
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index ba4ec39a1131..57b5c3c82e86 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -13,6 +13,7 @@
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index 495dc8af4044..fcd268ce0674 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -3,8 +3,6 @@
#include <linux/types.h>
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_ACCMODE 00000003
#define O_RDONLY 00000000
#define O_WRONLY 00000001
@@ -27,8 +25,8 @@
#ifndef O_NONBLOCK
#define O_NONBLOCK 00004000
#endif
-#ifndef O_SYNC
-#define O_SYNC 00010000
+#ifndef O_DSYNC
+#define O_DSYNC 00010000 /* used to be O_SYNC, see below */
#endif
#ifndef FASYNC
#define FASYNC 00020000 /* fcntl, for BSD compatibility */
@@ -51,6 +49,25 @@
#ifndef O_CLOEXEC
#define O_CLOEXEC 02000000 /* set close_on_exec */
#endif
+
+/*
+ * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using
+ * the O_SYNC flag. We continue to use the existing numerical value
+ * for O_DSYNC semantics now, but using the correct symbolic name for it.
+ * This new value is used to request true Posix O_SYNC semantics. It is
+ * defined in this strange way to make sure applications compiled against
+ * new headers get at least O_DSYNC semantics on older kernels.
+ *
+ * This has the nice side-effect that we can simply test for O_DSYNC
+ * wherever we do not care if O_DSYNC or O_SYNC is used.
+ *
+ * Note: __O_SYNC must never be used directly.
+ */
+#ifndef O_SYNC
+#define __O_SYNC 04000000
+#define O_SYNC (__O_SYNC|O_DSYNC)
+#endif
+
#ifndef O_NDELAY
#define O_NDELAY O_NONBLOCK
#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 66d6106a2067..485eeb6c4ef3 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -28,6 +28,7 @@ static inline int gpio_is_valid(int number)
return ((unsigned)number) < ARCH_NR_GPIOS;
}
+struct device;
struct seq_file;
struct module;
@@ -144,6 +145,7 @@ extern int __gpio_to_irq(unsigned gpio);
extern int gpio_export(unsigned gpio, bool direction_may_change);
extern int gpio_export_link(struct device *dev, const char *name,
unsigned gpio);
+extern int gpio_sysfs_set_active_low(unsigned gpio, int value);
extern void gpio_unexport(unsigned gpio);
#endif /* CONFIG_GPIO_SYSFS */
@@ -181,6 +183,8 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
#ifndef CONFIG_GPIO_SYSFS
+struct device;
+
/* sysfs support is only available with gpiolib, where it's optional */
static inline int gpio_export(unsigned gpio, bool direction_may_change)
@@ -194,6 +198,11 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return -ENOSYS;
}
+static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ return -ENOSYS;
+}
+
static inline void gpio_unexport(unsigned gpio)
{
}
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 4c8d0afae711..fb2d63f13f4c 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -47,7 +47,7 @@
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
-/* memmap is virtually contigious. */
+/* memmap is virtually contiguous. */
#define __pfn_to_page(pfn) (vmemmap + (pfn))
#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
index 5ee13b2fd223..3da9e2742fa0 100644
--- a/include/asm-generic/mman-common.h
+++ b/include/asm-generic/mman-common.h
@@ -19,6 +19,11 @@
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
+#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
+# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
+#else
+# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
+#endif
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_INVALIDATE 2 /* invalidate the caches */
@@ -35,6 +40,7 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
#define MADV_HWPOISON 100 /* poison a page for testing */
+#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 90079c373f1c..8087b90d4673 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define __raw_get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
+#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
@@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void);
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
#define __get_cpu_var(var) per_cpu_var(var)
#define __raw_get_cpu_var(var) per_cpu_var(var)
+#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
+#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
#endif /* SMP */
diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h
index 538991cef6f0..9a6115e7cf63 100644
--- a/include/asm-generic/socket.h
+++ b/include/asm-generic/socket.h
@@ -63,4 +63,5 @@
#define SO_PROTOCOL 38
#define SO_DOMAIN 39
+#define SO_RXQ_OVFL 40
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index d76b66acea95..6a0b30f78a62 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -622,16 +622,20 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
#define __NR_perf_event_open 241
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
+#define __NR_accept4 242
+__SYSCALL(__NR_accept4, sys_accept4)
+#define __NR_recvmmsg 243
+__SYSCALL(__NR_recvmmsg, sys_recvmmsg)
#undef __NR_syscalls
-#define __NR_syscalls 242
+#define __NR_syscalls 244
/*
* All syscalls below here should go away really,
* these are provided for both review and as a porting
* help for the C library version.
*
- * Last chance: are any of these important enought to
+ * Last chance: are any of these important enough to
* enable by default?
*/
#ifdef __ARCH_WANT_SYSCALL_NO_AT
@@ -802,7 +806,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall)
#define __NR_statfs __NR3264_statfs
#define __NR_fstatfs __NR3264_fstatfs
#define __NR_truncate __NR3264_truncate
-#define __NR_ftruncate __NR3264_truncate
+#define __NR_ftruncate __NR3264_ftruncate
#define __NR_lseek __NR3264_lseek
#define __NR_sendfile __NR3264_sendfile
#define __NR_newfstatat __NR3264_fstatat
@@ -818,7 +822,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall)
#define __NR_statfs64 __NR3264_statfs
#define __NR_fstatfs64 __NR3264_fstatfs
#define __NR_truncate64 __NR3264_truncate
-#define __NR_ftruncate64 __NR3264_truncate
+#define __NR_ftruncate64 __NR3264_ftruncate
#define __NR_llseek __NR3264_lseek
#define __NR_sendfile64 __NR3264_sendfile
#define __NR_fstatat64 __NR3264_fstatat
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index b6e818f4b247..67e652068e0e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -52,8 +52,12 @@
#define LOAD_OFFSET 0
#endif
-#ifndef VMLINUX_SYMBOL
-#define VMLINUX_SYMBOL(_sym_) _sym_
+#ifndef SYMBOL_PREFIX
+#define VMLINUX_SYMBOL(sym) sym
+#else
+#define PASTE2(x,y) x##y
+#define PASTE(x,y) PASTE2(x,y)
+#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
#endif
/* Align . to a 8 byte boundary equals to maximum function alignment. */
OpenPOWER on IntegriCloud