summaryrefslogtreecommitdiffstats
path: root/arch/sparc/vdso
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/vdso')
-rw-r--r--arch/sparc/vdso/Makefile33
-rw-r--r--arch/sparc/vdso/checkundef.sh10
-rw-r--r--arch/sparc/vdso/vclock_gettime.c221
-rw-r--r--arch/sparc/vdso/vdso-layout.lds.S6
-rw-r--r--arch/sparc/vdso/vdso.lds.S2
-rw-r--r--arch/sparc/vdso/vdso2c.c6
-rw-r--r--arch/sparc/vdso/vdso2c.h1
-rw-r--r--arch/sparc/vdso/vdso32/vdso32.lds.S2
-rw-r--r--arch/sparc/vdso/vma.c237
9 files changed, 409 insertions, 109 deletions
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index dc85570d8839..a6e18ca4cc18 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -33,10 +33,8 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
- -Wl,--no-undefined \
- -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
- $(DISABLE_LTO)
+VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \
+ -z max-page-size=8192 -z common-page-size=8192
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
@@ -54,13 +52,14 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
# Don't omit frame pointers for ease of userspace debugging, but do
# optimize sibling calls.
#
-CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables \
- -m64 -ffixed-g2 -ffixed-g3 -fcall-used-g4 -fcall-used-g5 -ffixed-g6 \
- -ffixed-g7 $(filter -g%,$(KBUILD_CFLAGS)) \
- $(call cc-option, -fno-stack-protector) -fno-omit-frame-pointer \
- -foptimize-sibling-calls -DBUILD_VDSO
+CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables -m64 \
+ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+ -fno-omit-frame-pointer -foptimize-sibling-calls \
+ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
-$(vobjs): KBUILD_CFLAGS += $(CFL)
+SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 -fcall-used-g5 -fcall-used-g7
+
+$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
#
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
@@ -73,7 +72,7 @@ $(obj)/%.so: $(obj)/%.so.dbg
$(call if_changed,objcopy)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf32_sparc,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
#This makes sure the $(obj) subdirectory exists even though vdso32/
#is not a kbuild sub-make subdirectory
@@ -91,7 +90,8 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -mno-app-regs -ffixed-g7
+KBUILD_CFLAGS_32 := $(filter-out $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
@@ -109,12 +109,13 @@ $(obj)/vdso32.so.dbg: FORCE \
# The DSO images are built using a special linker script.
#
quiet_cmd_vdso = VDSO $@
- cmd_vdso = $(CC) -nostdlib -o $@ \
+ cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
+ -T $(filter %.lds,$^) $(filter %.o,$^) && \
+ sh $(srctree)/$(src)/checkundef.sh '$(OBJDUMP)' '$@'
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
- $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic
+VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
+ $(call ld-option, --build-id) -Bsymbolic
GCOV_PROFILE := n
#
diff --git a/arch/sparc/vdso/checkundef.sh b/arch/sparc/vdso/checkundef.sh
new file mode 100644
index 000000000000..2d85876ffc32
--- /dev/null
+++ b/arch/sparc/vdso/checkundef.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+objdump="$1"
+file="$2"
+$objdump -t "$file" | grep '*UUND*' | grep -v '#scratch' > /dev/null 2>&1
+if [ $? -eq 1 ]; then
+ exit 0
+else
+ echo "$file: undefined symbols found" >&2
+ exit 1
+fi
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
index 3feb3d960ca5..55662c3b4513 100644
--- a/arch/sparc/vdso/vclock_gettime.c
+++ b/arch/sparc/vdso/vclock_gettime.c
@@ -12,11 +12,6 @@
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
-/* Disable profiling for userspace code: */
-#ifndef DISABLE_BRANCH_PROFILING
-#define DISABLE_BRANCH_PROFILING
-#endif
-
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/string.h>
@@ -26,16 +21,19 @@
#include <asm/clocksource.h>
#include <asm/vvar.h>
-#undef TICK_PRIV_BIT
#ifdef CONFIG_SPARC64
-#define TICK_PRIV_BIT (1UL << 63)
-#else
-#define TICK_PRIV_BIT (1ULL << 63)
-#endif
-
#define SYSCALL_STRING \
"ta 0x6d;" \
- "sub %%g0, %%o0, %%o0;" \
+ "bcs,a 1f;" \
+ " sub %%g0, %%o0, %%o0;" \
+ "1:"
+#else
+#define SYSCALL_STRING \
+ "ta 0x10;" \
+ "bcs,a 1f;" \
+ " sub %%g0, %%o0, %%o0;" \
+ "1:"
+#endif
#define SYSCALL_CLOBBERS \
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
@@ -50,24 +48,22 @@
* Compute the vvar page's address in the process address space, and return it
* as a pointer to the vvar_data.
*/
-static notrace noinline struct vvar_data *
-get_vvar_data(void)
+notrace static __always_inline struct vvar_data *get_vvar_data(void)
{
unsigned long ret;
/*
- * vdso data page is the first vDSO page so grab the return address
+ * vdso data page is the first vDSO page so grab the PC
* and move up a page to get to the data page.
*/
- ret = (unsigned long)__builtin_return_address(0);
+ __asm__("rd %%pc, %0" : "=r" (ret));
ret &= ~(8192 - 1);
ret -= 8192;
return (struct vvar_data *) ret;
}
-static notrace long
-vdso_fallback_gettime(long clock, struct timespec *ts)
+notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
register long num __asm__("g1") = __NR_clock_gettime;
register long o0 __asm__("o0") = clock;
@@ -78,8 +74,7 @@ vdso_fallback_gettime(long clock, struct timespec *ts)
return o0;
}
-static notrace __always_inline long
-vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
+notrace static long vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
{
register long num __asm__("g1") = __NR_gettimeofday;
register long o0 __asm__("o0") = (long) tv;
@@ -91,38 +86,44 @@ vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
}
#ifdef CONFIG_SPARC64
-static notrace noinline u64
-vread_tick(void) {
+notrace static __always_inline u64 vread_tick(void)
+{
+ u64 ret;
+
+ __asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
+ return ret;
+}
+
+notrace static __always_inline u64 vread_tick_stick(void)
+{
u64 ret;
- __asm__ __volatile__("rd %%asr24, %0 \n"
- ".section .vread_tick_patch, \"ax\" \n"
- "rd %%tick, %0 \n"
- ".previous \n"
- : "=&r" (ret));
- return ret & ~TICK_PRIV_BIT;
+ __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
+ return ret;
}
#else
-static notrace noinline u64
-vread_tick(void)
+notrace static __always_inline u64 vread_tick(void)
{
- unsigned int lo, hi;
-
- __asm__ __volatile__("rd %%asr24, %%g1\n\t"
- "srlx %%g1, 32, %1\n\t"
- "srl %%g1, 0, %0\n"
- ".section .vread_tick_patch, \"ax\" \n"
- "rd %%tick, %%g1\n"
- ".previous \n"
- : "=&r" (lo), "=&r" (hi)
- :
- : "g1");
- return lo | ((u64)hi << 32);
+ register unsigned long long ret asm("o4");
+
+ __asm__ __volatile__("rd %%tick, %L0\n\t"
+ "srlx %L0, 32, %H0"
+ : "=r" (ret));
+ return ret;
+}
+
+notrace static __always_inline u64 vread_tick_stick(void)
+{
+ register unsigned long long ret asm("o4");
+
+ __asm__ __volatile__("rd %%asr24, %L0\n\t"
+ "srlx %L0, 32, %H0"
+ : "=r" (ret));
+ return ret;
}
#endif
-static notrace inline u64
-vgetsns(struct vvar_data *vvar)
+notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
{
u64 v;
u64 cycles;
@@ -132,13 +133,22 @@ vgetsns(struct vvar_data *vvar)
return v * vvar->clock.mult;
}
-static notrace noinline int
-do_realtime(struct vvar_data *vvar, struct timespec *ts)
+notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar)
+{
+ u64 v;
+ u64 cycles;
+
+ cycles = vread_tick_stick();
+ v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
+ return v * vvar->clock.mult;
+}
+
+notrace static __always_inline int do_realtime(struct vvar_data *vvar,
+ struct timespec *ts)
{
unsigned long seq;
u64 ns;
- ts->tv_nsec = 0;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->wall_time_sec;
@@ -147,18 +157,38 @@ do_realtime(struct vvar_data *vvar, struct timespec *ts)
ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq)));
- timespec_add_ns(ts, ns);
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
return 0;
}
-static notrace noinline int
-do_monotonic(struct vvar_data *vvar, struct timespec *ts)
+notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
+ struct timespec *ts)
+{
+ unsigned long seq;
+ u64 ns;
+
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->wall_time_sec;
+ ns = vvar->wall_time_snsec;
+ ns += vgetsns_stick(vvar);
+ ns >>= vvar->clock.shift;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
+ struct timespec *ts)
{
unsigned long seq;
u64 ns;
- ts->tv_nsec = 0;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->monotonic_time_sec;
@@ -167,13 +197,34 @@ do_monotonic(struct vvar_data *vvar, struct timespec *ts)
ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq)));
- timespec_add_ns(ts, ns);
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
+ struct timespec *ts)
+{
+ unsigned long seq;
+ u64 ns;
+
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->monotonic_time_sec;
+ ns = vvar->monotonic_time_snsec;
+ ns += vgetsns_stick(vvar);
+ ns >>= vvar->clock.shift;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
return 0;
}
-static notrace noinline int
-do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
+notrace static int do_realtime_coarse(struct vvar_data *vvar,
+ struct timespec *ts)
{
unsigned long seq;
@@ -185,8 +236,8 @@ do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
return 0;
}
-static notrace noinline int
-do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts)
+notrace static int do_monotonic_coarse(struct vvar_data *vvar,
+ struct timespec *ts)
{
unsigned long seq;
@@ -228,6 +279,31 @@ clock_gettime(clockid_t, struct timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
notrace int
+__vdso_clock_gettime_stick(clockid_t clock, struct timespec *ts)
+{
+ struct vvar_data *vvd = get_vvar_data();
+
+ switch (clock) {
+ case CLOCK_REALTIME:
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+ break;
+ return do_realtime_stick(vvd, ts);
+ case CLOCK_MONOTONIC:
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+ break;
+ return do_monotonic_stick(vvd, ts);
+ case CLOCK_REALTIME_COARSE:
+ return do_realtime_coarse(vvd, ts);
+ case CLOCK_MONOTONIC_COARSE:
+ return do_monotonic_coarse(vvd, ts);
+ }
+ /*
+ * Unknown clock ID ? Fall back to the syscall.
+ */
+ return vdso_fallback_gettime(clock, ts);
+}
+
+notrace int
__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
struct vvar_data *vvd = get_vvar_data();
@@ -262,3 +338,36 @@ __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
int
gettimeofday(struct timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
+
+notrace int
+__vdso_gettimeofday_stick(struct timeval *tv, struct timezone *tz)
+{
+ struct vvar_data *vvd = get_vvar_data();
+
+ if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
+ if (likely(tv != NULL)) {
+ union tstv_t {
+ struct timespec ts;
+ struct timeval tv;
+ } *tstv = (union tstv_t *) tv;
+ do_realtime_stick(vvd, &tstv->ts);
+ /*
+ * Assign before dividing to ensure that the division is
+ * done in the type of tv_usec, not tv_nsec.
+ *
+ * There cannot be > 1 billion usec in a second:
+ * do_realtime() has already distributed such overflow
+ * into tv_sec. So we can assign it to an int safely.
+ */
+ tstv->tv.tv_usec = tstv->ts.tv_nsec;
+ tstv->tv.tv_usec /= 1000;
+ }
+ if (unlikely(tz != NULL)) {
+ /* Avoid memcpy. Some old compilers fail to inline it */
+ tz->tz_minuteswest = vvd->tz_minuteswest;
+ tz->tz_dsttime = vvd->tz_dsttime;
+ }
+ return 0;
+ }
+ return vdso_fallback_gettimeofday(tv, tz);
+}
diff --git a/arch/sparc/vdso/vdso-layout.lds.S b/arch/sparc/vdso/vdso-layout.lds.S
index f2c83abaca12..d31e57e8a3bb 100644
--- a/arch/sparc/vdso/vdso-layout.lds.S
+++ b/arch/sparc/vdso/vdso-layout.lds.S
@@ -73,12 +73,6 @@ SECTIONS
.text : { *(.text*) } :text =0x90909090,
- .vread_tick_patch : {
- vread_tick_patch_start = .;
- *(.vread_tick_patch)
- vread_tick_patch_end = .;
- }
-
/DISCARD/ : {
*(.discard)
*(.discard.*)
diff --git a/arch/sparc/vdso/vdso.lds.S b/arch/sparc/vdso/vdso.lds.S
index f3caa29a331c..629ab6900df7 100644
--- a/arch/sparc/vdso/vdso.lds.S
+++ b/arch/sparc/vdso/vdso.lds.S
@@ -18,8 +18,10 @@ VERSION {
global:
clock_gettime;
__vdso_clock_gettime;
+ __vdso_clock_gettime_stick;
gettimeofday;
__vdso_gettimeofday;
+ __vdso_gettimeofday_stick;
local: *;
};
}
diff --git a/arch/sparc/vdso/vdso2c.c b/arch/sparc/vdso/vdso2c.c
index 9f5b1cd6d51d..ab7504176a7f 100644
--- a/arch/sparc/vdso/vdso2c.c
+++ b/arch/sparc/vdso/vdso2c.c
@@ -63,9 +63,6 @@ enum {
sym_vvar_start,
sym_VDSO_FAKE_SECTION_TABLE_START,
sym_VDSO_FAKE_SECTION_TABLE_END,
- sym_vread_tick,
- sym_vread_tick_patch_start,
- sym_vread_tick_patch_end
};
struct vdso_sym {
@@ -81,9 +78,6 @@ struct vdso_sym required_syms[] = {
[sym_VDSO_FAKE_SECTION_TABLE_END] = {
"VDSO_FAKE_SECTION_TABLE_END", 0
},
- [sym_vread_tick] = {"vread_tick", 1},
- [sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1},
- [sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1}
};
__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
diff --git a/arch/sparc/vdso/vdso2c.h b/arch/sparc/vdso/vdso2c.h
index 808decb0f7be..60d69acc748f 100644
--- a/arch/sparc/vdso/vdso2c.h
+++ b/arch/sparc/vdso/vdso2c.h
@@ -17,7 +17,6 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
unsigned long mapping_size;
int i;
unsigned long j;
-
ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr;
ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
ELF(Dyn) *dyn = 0, *dyn_end = 0;
diff --git a/arch/sparc/vdso/vdso32/vdso32.lds.S b/arch/sparc/vdso/vdso32/vdso32.lds.S
index 53575ee154c4..218930fdff03 100644
--- a/arch/sparc/vdso/vdso32/vdso32.lds.S
+++ b/arch/sparc/vdso/vdso32/vdso32.lds.S
@@ -17,8 +17,10 @@ VERSION {
global:
clock_gettime;
__vdso_clock_gettime;
+ __vdso_clock_gettime_stick;
gettimeofday;
__vdso_gettimeofday;
+ __vdso_gettimeofday_stick;
local: *;
};
}
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
index f51595f861b8..154fe8adc090 100644
--- a/arch/sparc/vdso/vma.c
+++ b/arch/sparc/vdso/vma.c
@@ -16,6 +16,8 @@
#include <linux/linkage.h>
#include <linux/random.h>
#include <linux/elf.h>
+#include <asm/cacheflush.h>
+#include <asm/spitfire.h>
#include <asm/vdso.h>
#include <asm/vvar.h>
#include <asm/page.h>
@@ -40,20 +42,221 @@ static struct vm_special_mapping vdso_mapping32 = {
struct vvar_data *vvar_data;
-#define SAVE_INSTR_SIZE 4
+struct vdso_elfinfo32 {
+ Elf32_Ehdr *hdr;
+ Elf32_Sym *dynsym;
+ unsigned long dynsymsize;
+ const char *dynstr;
+ unsigned long text;
+};
+
+struct vdso_elfinfo64 {
+ Elf64_Ehdr *hdr;
+ Elf64_Sym *dynsym;
+ unsigned long dynsymsize;
+ const char *dynstr;
+ unsigned long text;
+};
+
+struct vdso_elfinfo {
+ union {
+ struct vdso_elfinfo32 elf32;
+ struct vdso_elfinfo64 elf64;
+ } u;
+};
+
+static void *one_section64(struct vdso_elfinfo64 *e, const char *name,
+ unsigned long *size)
+{
+ const char *snames;
+ Elf64_Shdr *shdrs;
+ unsigned int i;
+
+ shdrs = (void *)e->hdr + e->hdr->e_shoff;
+ snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < e->hdr->e_shnum; i++) {
+ if (!strcmp(snames+shdrs[i].sh_name, name)) {
+ if (size)
+ *size = shdrs[i].sh_size;
+ return (void *)e->hdr + shdrs[i].sh_offset;
+ }
+ }
+ return NULL;
+}
+
+static int find_sections64(const struct vdso_image *image, struct vdso_elfinfo *_e)
+{
+ struct vdso_elfinfo64 *e = &_e->u.elf64;
+
+ e->hdr = image->data;
+ e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize);
+ e->dynstr = one_section64(e, ".dynstr", NULL);
+
+ if (!e->dynsym || !e->dynstr) {
+ pr_err("VDSO64: Missing symbol sections.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) {
+ Elf64_Sym *s = &e->dynsym[i];
+ if (s->st_name == 0)
+ continue;
+ if (!strcmp(e->dynstr + s->st_name, name))
+ return s;
+ }
+ return NULL;
+}
+
+static int patchsym64(struct vdso_elfinfo *_e, const char *orig,
+ const char *new)
+{
+ struct vdso_elfinfo64 *e = &_e->u.elf64;
+ Elf64_Sym *osym = find_sym64(e, orig);
+ Elf64_Sym *nsym = find_sym64(e, new);
+
+ if (!nsym || !osym) {
+ pr_err("VDSO64: Missing symbols.\n");
+ return -ENODEV;
+ }
+ osym->st_value = nsym->st_value;
+ osym->st_size = nsym->st_size;
+ osym->st_info = nsym->st_info;
+ osym->st_other = nsym->st_other;
+ osym->st_shndx = nsym->st_shndx;
+
+ return 0;
+}
+
+static void *one_section32(struct vdso_elfinfo32 *e, const char *name,
+ unsigned long *size)
+{
+ const char *snames;
+ Elf32_Shdr *shdrs;
+ unsigned int i;
+
+ shdrs = (void *)e->hdr + e->hdr->e_shoff;
+ snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < e->hdr->e_shnum; i++) {
+ if (!strcmp(snames+shdrs[i].sh_name, name)) {
+ if (size)
+ *size = shdrs[i].sh_size;
+ return (void *)e->hdr + shdrs[i].sh_offset;
+ }
+ }
+ return NULL;
+}
+
+static int find_sections32(const struct vdso_image *image, struct vdso_elfinfo *_e)
+{
+ struct vdso_elfinfo32 *e = &_e->u.elf32;
+
+ e->hdr = image->data;
+ e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize);
+ e->dynstr = one_section32(e, ".dynstr", NULL);
+
+ if (!e->dynsym || !e->dynstr) {
+ pr_err("VDSO32: Missing symbol sections.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) {
+ Elf32_Sym *s = &e->dynsym[i];
+ if (s->st_name == 0)
+ continue;
+ if (!strcmp(e->dynstr + s->st_name, name))
+ return s;
+ }
+ return NULL;
+}
+
+static int patchsym32(struct vdso_elfinfo *_e, const char *orig,
+ const char *new)
+{
+ struct vdso_elfinfo32 *e = &_e->u.elf32;
+ Elf32_Sym *osym = find_sym32(e, orig);
+ Elf32_Sym *nsym = find_sym32(e, new);
+
+ if (!nsym || !osym) {
+ pr_err("VDSO32: Missing symbols.\n");
+ return -ENODEV;
+ }
+ osym->st_value = nsym->st_value;
+ osym->st_size = nsym->st_size;
+ osym->st_info = nsym->st_info;
+ osym->st_other = nsym->st_other;
+ osym->st_shndx = nsym->st_shndx;
+
+ return 0;
+}
+
+static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e,
+ bool elf64)
+{
+ if (elf64)
+ return find_sections64(image, e);
+ else
+ return find_sections32(image, e);
+}
+
+static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig,
+ const char *new_target, bool elf64)
+{
+ if (elf64)
+ return patchsym64(e, orig, new_target);
+ else
+ return patchsym32(e, orig, new_target);
+}
+
+static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64)
+{
+ int err;
+
+ err = find_sections(image, e, elf64);
+ if (err)
+ return err;
+
+ err = patch_one_symbol(e,
+ "__vdso_gettimeofday",
+ "__vdso_gettimeofday_stick", elf64);
+ if (err)
+ return err;
+
+ return patch_one_symbol(e,
+ "__vdso_clock_gettime",
+ "__vdso_clock_gettime_stick", elf64);
+ return 0;
+}
/*
* Allocate pages for the vdso and vvar, and copy in the vdso text from the
* kernel image.
*/
int __init init_vdso_image(const struct vdso_image *image,
- struct vm_special_mapping *vdso_mapping)
+ struct vm_special_mapping *vdso_mapping, bool elf64)
{
- int i;
+ int cnpages = (image->size) / PAGE_SIZE;
struct page *dp, **dpp = NULL;
- int dnpages = 0;
struct page *cp, **cpp = NULL;
- int cnpages = (image->size) / PAGE_SIZE;
+ struct vdso_elfinfo ei;
+ int i, dnpages = 0;
+
+ if (tlb_type != spitfire) {
+ int err = stick_patch(image, &ei, elf64);
+ if (err)
+ return err;
+ }
/*
* First, the vdso text. This is initialied data, an integral number of
@@ -68,22 +271,6 @@ int __init init_vdso_image(const struct vdso_image *image,
if (!cpp)
goto oom;
- if (vdso_fix_stick) {
- /*
- * If the system uses %tick instead of %stick, patch the VDSO
- * with instruction reading %tick instead of %stick.
- */
- unsigned int j, k = SAVE_INSTR_SIZE;
- unsigned char *data = image->data;
-
- for (j = image->sym_vread_tick_patch_start;
- j < image->sym_vread_tick_patch_end; j++) {
-
- data[image->sym_vread_tick + k] = data[j];
- k++;
- }
- }
-
for (i = 0; i < cnpages; i++) {
cp = alloc_page(GFP_KERNEL);
if (!cp)
@@ -146,13 +333,13 @@ static int __init init_vdso(void)
{
int err = 0;
#ifdef CONFIG_SPARC64
- err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
+ err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64, true);
if (err)
return err;
#endif
#ifdef CONFIG_COMPAT
- err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
+ err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32, false);
#endif
return err;
@@ -262,7 +449,9 @@ static __init int vdso_setup(char *s)
unsigned long val;
err = kstrtoul(s, 10, &val);
+ if (err)
+ return err;
vdso_enabled = val;
- return err;
+ return 0;
}
__setup("vdso=", vdso_setup);
OpenPOWER on IntegriCloud