summaryrefslogtreecommitdiffstats
path: root/arch/x86/entry
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/entry')
-rw-r--r--arch/x86/entry/entry_32.S2
-rw-r--r--arch/x86/entry/entry_64.S4
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl3
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl3
-rw-r--r--arch/x86/entry/syscalls/syscalltbl.sh15
-rw-r--r--arch/x86/entry/thunk_32.S3
-rw-r--r--arch/x86/entry/thunk_64.S3
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c2
8 files changed, 27 insertions, 8 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index b75a8bcd2d23..21b352a11b49 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/export.h>
.section .entry.text, "ax"
@@ -991,6 +992,7 @@ trace:
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
+EXPORT_SYMBOL(mcount)
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index fee1d95902b5..ef766a358b37 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -35,6 +35,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/pgtable_types.h>
+#include <asm/export.h>
#include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
@@ -875,6 +876,7 @@ ENTRY(native_load_gs_index)
popfq
ret
END(native_load_gs_index)
+EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, bad_gs)
.section .fixup, "ax"
@@ -1148,7 +1150,7 @@ END(error_entry)
/*
- * On entry, EBS is a "return to kernel mode" flag:
+ * On entry, EBX is a "return to kernel mode" flag:
* 1: already in kernel mode, don't need SWAPGS
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index f848572169ea..2b3618542544 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -386,3 +386,6 @@
377 i386 copy_file_range sys_copy_file_range
378 i386 preadv2 sys_preadv2 compat_sys_preadv2
379 i386 pwritev2 sys_pwritev2 compat_sys_pwritev2
+380 i386 pkey_mprotect sys_pkey_mprotect
+381 i386 pkey_alloc sys_pkey_alloc
+382 i386 pkey_free sys_pkey_free
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index e9ce9c7c39b4..e93ef0b38db8 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -335,6 +335,9 @@
326 common copy_file_range sys_copy_file_range
327 64 preadv2 sys_preadv2
328 64 pwritev2 sys_pwritev2
+329 common pkey_mprotect sys_pkey_mprotect
+330 common pkey_alloc sys_pkey_alloc
+331 common pkey_free sys_pkey_free
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
index cd3d3015d7df..751d1f992630 100644
--- a/arch/x86/entry/syscalls/syscalltbl.sh
+++ b/arch/x86/entry/syscalls/syscalltbl.sh
@@ -10,8 +10,11 @@ syscall_macro() {
# Entry can be either just a function name or "function/qualifier"
real_entry="${entry%%/*}"
- qualifier="${entry:${#real_entry}}" # Strip the function name
- qualifier="${qualifier:1}" # Strip the slash, if any
+ if [ "$entry" = "$real_entry" ]; then
+ qualifier=
+ else
+ qualifier=${entry#*/}
+ fi
echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier)"
}
@@ -22,7 +25,7 @@ emit() {
entry="$3"
compat="$4"
- if [ "$abi" == "64" -a -n "$compat" ]; then
+ if [ "$abi" = "64" -a -n "$compat" ]; then
echo "a compat entry for a 64-bit syscall makes no sense" >&2
exit 1
fi
@@ -45,17 +48,17 @@ emit() {
grep '^[0-9]' "$in" | sort -n | (
while read nr abi name entry compat; do
abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
- if [ "$abi" == "COMMON" -o "$abi" == "64" ]; then
+ if [ "$abi" = "COMMON" -o "$abi" = "64" ]; then
# COMMON is the same as 64, except that we don't expect X32
# programs to use it. Our expectation has nothing to do with
# any generated code, so treat them the same.
emit 64 "$nr" "$entry" "$compat"
- elif [ "$abi" == "X32" ]; then
+ elif [ "$abi" = "X32" ]; then
# X32 is equivalent to 64 on an X32-compatible kernel.
echo "#ifdef CONFIG_X86_X32_ABI"
emit 64 "$nr" "$entry" "$compat"
echo "#endif"
- elif [ "$abi" == "I386" ]; then
+ elif [ "$abi" = "I386" ]; then
emit "$abi" "$nr" "$entry" "$compat"
else
echo "Unknown abi $abi" >&2
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
index e5a17114a8c4..fee6bc79b987 100644
--- a/arch/x86/entry/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <asm/asm.h>
+ #include <asm/export.h>
/* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0
@@ -36,5 +37,7 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
+ EXPORT_SYMBOL(___preempt_schedule)
+ EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index 627ecbcb2e62..be36bf4e0957 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include "calling.h"
#include <asm/asm.h>
+#include <asm/export.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
@@ -49,6 +50,8 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
+ EXPORT_SYMBOL(___preempt_schedule)
+ EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif
#if defined(CONFIG_TRACE_IRQFLAGS) \
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 94d54d0defa7..02223cb4bcfd 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -129,7 +129,7 @@ static notrace cycle_t vread_pvclock(int *mode)
return 0;
}
- ret = __pvclock_read_cycles(pvti);
+ ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
} while (pvclock_read_retry(pvti, version));
/* refer to vread_tsc() comment for rationale */
OpenPOWER on IntegriCloud