diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/crypto/sha512_ssse3_glue.c | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/ptrace.h | 16 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 10 | ||||
-rw-r--r-- | arch/x86/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 1 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 2 | ||||
-rw-r--r-- | arch/x86/vdso/Makefile | 24 | ||||
-rw-r--r-- | arch/x86/vdso/vclock_gettime.c | 3 | ||||
-rw-r--r-- | arch/x86/vdso/vdso-fakesections.c | 41 | ||||
-rw-r--r-- | arch/x86/vdso/vdso-layout.lds.S | 64 | ||||
-rw-r--r-- | arch/x86/vdso/vdso.lds.S | 2 | ||||
-rw-r--r-- | arch/x86/vdso/vdso2c.c | 73 | ||||
-rw-r--r-- | arch/x86/vdso/vdso2c.h | 202 | ||||
-rw-r--r-- | arch/x86/vdso/vdso32/vdso-fakesections.c | 1 | ||||
-rw-r--r-- | arch/x86/vdso/vdsox32.lds.S | 2 | ||||
-rw-r--r-- | arch/x86/vdso/vma.c | 4 |
19 files changed, 336 insertions, 130 deletions
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index f30cd10293f0..8626b03e83b7 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) /* save number of bits */ bits[1] = cpu_to_be64(sctx->count[0] << 3); - bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61; + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); /* Pad out to 112 mod 128 and append length */ index = sctx->count[0] & 0x7f; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 49314155b66c..49205d01b9ad 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) #define KVM_REFILL_PAGES 25 #define KVM_MAX_CPUID_ENTRIES 80 #define KVM_NR_FIXED_MTRR_REGION 88 -#define KVM_NR_VAR_MTRR 8 +#define KVM_NR_VAR_MTRR 10 #define ASYNC_PF_PER_VCPU 64 @@ -461,7 +461,7 @@ struct kvm_vcpu_arch { bool nmi_injected; /* Trying to inject an NMI this entry */ struct mtrr_state_type mtrr_state; - u32 pat; + u64 pat; unsigned switch_db_regs; unsigned long db[KVM_NR_DB_REGS]; diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 14fd6fd75a19..6205f0c434db 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -231,6 +231,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, #define ARCH_HAS_USER_SINGLE_STEP_INFO +/* + * When hitting ptrace_stop(), we cannot return using SYSRET because + * that does not restore the full CPU state, only a minimal set. The + * ptracer can change arbitrary register values, which is usually okay + * because the usual ptrace stops run off the signal delivery path which + * forces IRET; however, ptrace_event() stops happen in arbitrary places + * in the kernel and don't force IRET path. + * + * So force IRET path after a ptrace stop. + */ +#define arch_ptrace_stop_needed(code, info) \ +({ \ + set_thread_flag(TIF_NOTIFY_RESUME); \ + false; \ +}) + struct user_desc; extern int do_get_thread_area(struct task_struct *p, int idx, struct user_desc __user *info); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index adb02aa62af5..07846d738bdb 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1382,6 +1382,15 @@ again: intel_pmu_lbr_read(); /* + * CondChgd bit 63 doesn't mean any overflow status. Ignore + * and clear the bit. + */ + if (__test_and_clear_bit(63, (unsigned long *)&status)) { + if (!status) + goto done; + } + + /* * PEBS overflow sets bit 62 in the global status register */ if (__test_and_clear_bit(62, (unsigned long *)&status)) { diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index f0da82b8e634..dbaa23e78b36 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -423,9 +423,10 @@ sysenter_past_esp: jnz sysenter_audit sysenter_do_call: cmpl $(NR_syscalls), %eax - jae syscall_badsys + jae sysenter_badsys call *sys_call_table(,%eax,4) movl %eax,PT_EAX(%esp) +sysenter_after_call: LOCKDEP_SYS_EXIT DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF @@ -675,7 +676,12 @@ END(syscall_fault) syscall_badsys: movl $-ENOSYS,PT_EAX(%esp) - jmp resume_userspace + jmp syscall_exit +END(syscall_badsys) + +sysenter_badsys: + movl $-ENOSYS,PT_EAX(%esp) + jmp sysenter_after_call END(syscall_badsys) CFI_ENDPROC diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index a0da58db43a8..2851d63c1202 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, /* Set up to return from userspace. */ restorer = current->mm->context.vdso + - selected_vdso32->sym___kernel_sigreturn; + selected_vdso32->sym___kernel_rt_sigreturn; if (ksig->ka.sa.sa_flags & SA_RESTORER) restorer = ksig->ka.sa.sa_restorer; put_user_ex(restorer, &frame->pretcode); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 57e5ce126d5a..ea030319b321 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); if (!(freq->flags & CPUFREQ_CONST_LOOPS)) mark_tsc_unstable("cpufreq changes"); - } - set_cyc2ns_scale(tsc_khz, freq->cpu); + set_cyc2ns_scale(tsc_khz, freq->cpu); + } return 0; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ec8366c5cfea..b5e994ad0135 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1462,6 +1462,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, */ if (var->unusable) var->db = 0; + var->dpl = to_svm(vcpu)->vmcb->save.cpl; break; } } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f32a02578c0d..f6449334ec45 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1898,7 +1898,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) break; gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; - if (kvm_write_guest(kvm, data, + if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, &tsc_ref, sizeof(tsc_ref))) return 1; mark_page_dirty(kvm, gfn); diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 3c0809a0631f..61b04fe36e66 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -11,7 +11,6 @@ VDSO32-$(CONFIG_COMPAT) := y # files to link into the vdso vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o -vobjs-nox32 := vdso-fakesections.o # files to link into kernel obj-y += vma.o @@ -67,7 +66,8 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso2c FORCE # CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ - -fno-omit-frame-pointer -foptimize-sibling-calls + -fno-omit-frame-pointer -foptimize-sibling-calls \ + -DDISABLE_BRANCH_PROFILING $(vobjs): KBUILD_CFLAGS += $(CFL) @@ -134,7 +134,7 @@ override obj-dirs = $(dir $(obj)) $(obj)/vdso32/ targets += vdso32/vdso32.lds targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o) -targets += vdso32/vclock_gettime.o +targets += vdso32/vclock_gettime.o vdso32/vdso-fakesections.o $(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%) @@ -150,11 +150,13 @@ KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) KBUILD_CFLAGS_32 += -fno-omit-frame-pointer +KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING $(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) $(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \ $(obj)/vdso32/vdso32.lds \ $(obj)/vdso32/vclock_gettime.o \ + $(obj)/vdso32/vdso-fakesections.o \ $(obj)/vdso32/note.o \ $(obj)/vdso32/%.o $(call if_changed,vdso) @@ -169,14 +171,24 @@ quiet_cmd_vdso = VDSO $@ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ - -Wl,-Bsymbolic $(LTO_CFLAGS) + $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) GCOV_PROFILE := n # -# Install the unstripped copies of vdso*.so. +# Install the unstripped copies of vdso*.so. If our toolchain supports +# build-id, install .build-id links as well. # quiet_cmd_vdso_install = INSTALL $(@:install_%=%) - cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%) +define cmd_vdso_install + cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \ + if readelf -n $< |grep -q 'Build ID'; then \ + buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \ + first=`echo $$buildid | cut -b-2`; \ + last=`echo $$buildid | cut -b3-`; \ + mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \ + ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \ + fi +endef vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%) diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index b2e4f493e5b0..9793322751e0 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -11,9 +11,6 @@ * Check with readelf after changing. */ -/* Disable profiling for userspace code: */ -#define DISABLE_BRANCH_PROFILING - #include <uapi/linux/time.h> #include <asm/vgtod.h> #include <asm/hpet.h> diff --git a/arch/x86/vdso/vdso-fakesections.c b/arch/x86/vdso/vdso-fakesections.c index cb8a8d72c24b..aa5fbfab20a5 100644 --- a/arch/x86/vdso/vdso-fakesections.c +++ b/arch/x86/vdso/vdso-fakesections.c @@ -2,31 +2,20 @@ * Copyright 2014 Andy Lutomirski * Subject to the GNU Public License, v.2 * - * Hack to keep broken Go programs working. - * - * The Go runtime had a couple of bugs: it would read the section table to try - * to figure out how many dynamic symbols there were (it shouldn't have looked - * at the section table at all) and, if there were no SHT_SYNDYM section table - * entry, it would use an uninitialized value for the number of symbols. As a - * workaround, we supply a minimal section table. vdso2c will adjust the - * in-memory image so that "vdso_fake_sections" becomes the section table. - * - * The bug was introduced by: - * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31) - * and is being addressed in the Go runtime in this issue: - * https://code.google.com/p/go/issues/detail?id=8197 + * String table for loadable section headers. See vdso2c.h for why + * this exists. */ -#ifndef __x86_64__ -#error This hack is specific to the 64-bit vDSO -#endif - -#include <linux/elf.h> - -extern const __visible struct elf64_shdr vdso_fake_sections[]; -const __visible struct elf64_shdr vdso_fake_sections[] = { - { - .sh_type = SHT_DYNSYM, - .sh_entsize = sizeof(Elf64_Sym), - } -}; +const char fake_shstrtab[] __attribute__((section(".fake_shstrtab"))) = + ".hash\0" + ".dynsym\0" + ".dynstr\0" + ".gnu.version\0" + ".gnu.version_d\0" + ".dynamic\0" + ".rodata\0" + ".fake_shstrtab\0" /* Yay, self-referential code. */ + ".note\0" + ".eh_frame_hdr\0" + ".eh_frame\0" + ".text"; diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S index 2ec72f651ebf..9197544eea9a 100644 --- a/arch/x86/vdso/vdso-layout.lds.S +++ b/arch/x86/vdso/vdso-layout.lds.S @@ -6,6 +6,16 @@ * This script controls its layout. */ +#if defined(BUILD_VDSO64) +# define SHDR_SIZE 64 +#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32) +# define SHDR_SIZE 40 +#else +# error unknown VDSO target +#endif + +#define NUM_FAKE_SHDRS 13 + SECTIONS { . = SIZEOF_HEADERS; @@ -18,36 +28,53 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { + *(.rodata*) + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + + /* + * Ideally this would live in a C file, but that won't + * work cleanly for x32 until we start building the x32 + * C code using an x32 toolchain. + */ + VDSO_FAKE_SECTION_TABLE_START = .; + . = . + NUM_FAKE_SHDRS * SHDR_SIZE; + VDSO_FAKE_SECTION_TABLE_END = .; + } :text + + .fake_shstrtab : { *(.fake_shstrtab) } :text + + .note : { *(.note.*) } :text :note .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text - .dynamic : { *(.dynamic) } :text :dynamic - - .rodata : { *(.rodata*) } :text - .data : { - *(.data*) - *(.sdata*) - *(.got.plt) *(.got) - *(.gnu.linkonce.d.*) - *(.bss*) - *(.dynbss*) - *(.gnu.linkonce.b.*) - } - - .altinstructions : { *(.altinstructions) } - .altinstr_replacement : { *(.altinstr_replacement) } /* - * Align the actual code well away from the non-instruction data. - * This is the best thing for the I-cache. + * Text is well-separated from actual data: there's plenty of + * stuff that isn't used at runtime in between. */ - . = ALIGN(0x100); .text : { *(.text*) } :text =0x90909090, /* + * At the end so that eu-elflint stays happy when vdso2c strips + * these. A better implementation would avoid allocating space + * for these. + */ + .altinstructions : { *(.altinstructions) } :text + .altinstr_replacement : { *(.altinstr_replacement) } :text + + /* * The remainder of the vDSO consists of special pages that are * shared between the kernel and userspace. It needs to be at the * end so that it doesn't overlap the mapping of the actual @@ -75,6 +102,7 @@ SECTIONS /DISCARD/ : { *(.discard) *(.discard.*) + *(__bug_table) } } diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S index 75e3404c83b1..6807932643c2 100644 --- a/arch/x86/vdso/vdso.lds.S +++ b/arch/x86/vdso/vdso.lds.S @@ -6,6 +6,8 @@ * the DSO. */ +#define BUILD_VDSO64 + #include "vdso-layout.lds.S" /* diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c index 7a6bf50f9165..238dbe82776e 100644 --- a/arch/x86/vdso/vdso2c.c +++ b/arch/x86/vdso/vdso2c.c @@ -23,6 +23,8 @@ enum { sym_vvar_page, sym_hpet_page, sym_end_mapping, + sym_VDSO_FAKE_SECTION_TABLE_START, + sym_VDSO_FAKE_SECTION_TABLE_END, }; const int special_pages[] = { @@ -30,15 +32,26 @@ const int special_pages[] = { sym_hpet_page, }; -char const * const required_syms[] = { - [sym_vvar_page] = "vvar_page", - [sym_hpet_page] = "hpet_page", - [sym_end_mapping] = "end_mapping", - "VDSO32_NOTE_MASK", - "VDSO32_SYSENTER_RETURN", - "__kernel_vsyscall", - "__kernel_sigreturn", - "__kernel_rt_sigreturn", +struct vdso_sym { + const char *name; + bool export; +}; + +struct vdso_sym required_syms[] = { + [sym_vvar_page] = {"vvar_page", true}, + [sym_hpet_page] = {"hpet_page", true}, + [sym_end_mapping] = {"end_mapping", true}, + [sym_VDSO_FAKE_SECTION_TABLE_START] = { + "VDSO_FAKE_SECTION_TABLE_START", false + }, + [sym_VDSO_FAKE_SECTION_TABLE_END] = { + "VDSO_FAKE_SECTION_TABLE_END", false + }, + {"VDSO32_NOTE_MASK", true}, + {"VDSO32_SYSENTER_RETURN", true}, + {"__kernel_vsyscall", true}, + {"__kernel_sigreturn", true}, + {"__kernel_rt_sigreturn", true}, }; __attribute__((format(printf, 1, 2))) __attribute__((noreturn)) @@ -83,37 +96,21 @@ extern void bad_put_le(void); #define NSYMS (sizeof(required_syms) / sizeof(required_syms[0])) -#define BITS 64 -#define GOFUNC go64 -#define Elf_Ehdr Elf64_Ehdr -#define Elf_Shdr Elf64_Shdr -#define Elf_Phdr Elf64_Phdr -#define Elf_Sym Elf64_Sym -#define Elf_Dyn Elf64_Dyn +#define BITSFUNC3(name, bits) name##bits +#define BITSFUNC2(name, bits) BITSFUNC3(name, bits) +#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS) + +#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x +#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x) +#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x) + +#define ELF_BITS 64 #include "vdso2c.h" -#undef BITS -#undef GOFUNC -#undef Elf_Ehdr -#undef Elf_Shdr -#undef Elf_Phdr -#undef Elf_Sym -#undef Elf_Dyn - -#define BITS 32 -#define GOFUNC go32 -#define Elf_Ehdr Elf32_Ehdr -#define Elf_Shdr Elf32_Shdr -#define Elf_Phdr Elf32_Phdr -#define Elf_Sym Elf32_Sym -#define Elf_Dyn Elf32_Dyn +#undef ELF_BITS + +#define ELF_BITS 32 #include "vdso2c.h" -#undef BITS -#undef GOFUNC -#undef Elf_Ehdr -#undef Elf_Shdr -#undef Elf_Phdr -#undef Elf_Sym -#undef Elf_Dyn +#undef ELF_BITS static void go(void *addr, size_t len, FILE *outfile, const char *name) { diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h index c6eefaf389b9..11b65d4f9414 100644 --- a/arch/x86/vdso/vdso2c.h +++ b/arch/x86/vdso/vdso2c.h @@ -4,23 +4,139 @@ * are built for 32-bit userspace. */ -static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) +/* + * We're writing a section table for a few reasons: + * + * The Go runtime had a couple of bugs: it would read the section + * table to try to figure out how many dynamic symbols there were (it + * shouldn't have looked at the section table at all) and, if there + * were no SHT_SYNDYM section table entry, it would use an + * uninitialized value for the number of symbols. An empty DYNSYM + * table would work, but I see no reason not to write a valid one (and + * keep full performance for old Go programs). This hack is only + * needed on x86_64. + * + * The bug was introduced on 2012-08-31 by: + * https://code.google.com/p/go/source/detail?r=56ea40aac72b + * and was fixed on 2014-06-13 by: + * https://code.google.com/p/go/source/detail?r=fc1cd5e12595 + * + * Binutils has issues debugging the vDSO: it reads the section table to + * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which + * would break build-id if we removed the section table. Binutils + * also requires that shstrndx != 0. See: + * https://sourceware.org/bugzilla/show_bug.cgi?id=17064 + * + * elfutils might not look for PT_NOTE if there is a section table at + * all. I don't know whether this matters for any practical purpose. + * + * For simplicity, rather than hacking up a partial section table, we + * just write a mostly complete one. We omit non-dynamic symbols, + * though, since they're rather large. + * + * Once binutils gets fixed, we might be able to drop this for all but + * the 64-bit vdso, since build-id only works in kernel RPMs, and + * systems that update to new enough kernel RPMs will likely update + * binutils in sync. build-id has never worked for home-built kernel + * RPMs without manual symlinking, and I suspect that no one ever does + * that. + */ +struct BITSFUNC(fake_sections) +{ + ELF(Shdr) *table; + unsigned long table_offset; + int count, max_count; + + int in_shstrndx; + unsigned long shstr_offset; + const char *shstrtab; + size_t shstrtab_len; + + int out_shstrndx; +}; + +static unsigned int BITSFUNC(find_shname)(struct BITSFUNC(fake_sections) *out, + const char *name) +{ + const char *outname = out->shstrtab; + while (outname - out->shstrtab < out->shstrtab_len) { + if (!strcmp(name, outname)) + return (outname - out->shstrtab) + out->shstr_offset; + outname += strlen(outname) + 1; + } + + if (*name) + printf("Warning: could not find output name \"%s\"\n", name); + return out->shstr_offset + out->shstrtab_len - 1; /* Use a null. */ +} + +static void BITSFUNC(init_sections)(struct BITSFUNC(fake_sections) *out) +{ + if (!out->in_shstrndx) + fail("didn't find the fake shstrndx\n"); + + memset(out->table, 0, out->max_count * sizeof(ELF(Shdr))); + + if (out->max_count < 1) + fail("we need at least two fake output sections\n"); + + PUT_LE(&out->table[0].sh_type, SHT_NULL); + PUT_LE(&out->table[0].sh_name, BITSFUNC(find_shname)(out, "")); + + out->count = 1; +} + +static void BITSFUNC(copy_section)(struct BITSFUNC(fake_sections) *out, + int in_idx, const ELF(Shdr) *in, + const char *name) +{ + uint64_t flags = GET_LE(&in->sh_flags); + + bool copy = flags & SHF_ALLOC && + (GET_LE(&in->sh_size) || + (GET_LE(&in->sh_type) != SHT_RELA && + GET_LE(&in->sh_type) != SHT_REL)) && + strcmp(name, ".altinstructions") && + strcmp(name, ".altinstr_replacement"); + + if (!copy) + return; + + if (out->count >= out->max_count) + fail("too many copied sections (max = %d)\n", out->max_count); + + if (in_idx == out->in_shstrndx) + out->out_shstrndx = out->count; + + out->table[out->count] = *in; + PUT_LE(&out->table[out->count].sh_name, + BITSFUNC(find_shname)(out, name)); + + /* elfutils requires that a strtab have the correct type. */ + if (!strcmp(name, ".fake_shstrtab")) + PUT_LE(&out->table[out->count].sh_type, SHT_STRTAB); + + out->count++; +} + +static void BITSFUNC(go)(void *addr, size_t len, + FILE *outfile, const char *name) { int found_load = 0; unsigned long load_size = -1; /* Work around bogus warning */ unsigned long data_size; - Elf_Ehdr *hdr = (Elf_Ehdr *)addr; + ELF(Ehdr) *hdr = (ELF(Ehdr) *)addr; int i; unsigned long j; - Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, + ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, *alt_sec = NULL; - Elf_Dyn *dyn = 0, *dyn_end = 0; + ELF(Dyn) *dyn = 0, *dyn_end = 0; const char *secstrings; uint64_t syms[NSYMS] = {}; - uint64_t fake_sections_value = 0, fake_sections_size = 0; + struct BITSFUNC(fake_sections) fake_sections = {}; - Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff)); + ELF(Phdr) *pt = (ELF(Phdr) *)(addr + GET_LE(&hdr->e_phoff)); /* Walk the segment table. */ for (i = 0; i < GET_LE(&hdr->e_phnum); i++) { @@ -51,7 +167,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) for (i = 0; dyn + i < dyn_end && GET_LE(&dyn[i].d_tag) != DT_NULL; i++) { typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag); - if (tag == DT_REL || tag == DT_RELSZ || + if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA || tag == DT_RELENT || tag == DT_TEXTREL) fail("vdso image contains dynamic relocations\n"); } @@ -61,7 +177,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx); secstrings = addr + GET_LE(&secstrings_hdr->sh_offset); for (i = 0; i < GET_LE(&hdr->e_shnum); i++) { - Elf_Shdr *sh = addr + GET_LE(&hdr->e_shoff) + + ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) + GET_LE(&hdr->e_shentsize) * i; if (GET_LE(&sh->sh_type) == SHT_SYMTAB) symtab_hdr = sh; @@ -82,29 +198,63 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize); i++) { int k; - Elf_Sym *sym = addr + GET_LE(&symtab_hdr->sh_offset) + + ELF(Sym) *sym = addr + GET_LE(&symtab_hdr->sh_offset) + GET_LE(&symtab_hdr->sh_entsize) * i; const char *name = addr + GET_LE(&strtab_hdr->sh_offset) + GET_LE(&sym->st_name); for (k = 0; k < NSYMS; k++) { - if (!strcmp(name, required_syms[k])) { + if (!strcmp(name, required_syms[k].name)) { if (syms[k]) { fail("duplicate symbol %s\n", - required_syms[k]); + required_syms[k].name); } syms[k] = GET_LE(&sym->st_value); } } - if (!strcmp(name, "vdso_fake_sections")) { - if (fake_sections_value) - fail("duplicate vdso_fake_sections\n"); - fake_sections_value = GET_LE(&sym->st_value); - fake_sections_size = GET_LE(&sym->st_size); + if (!strcmp(name, "fake_shstrtab")) { + ELF(Shdr) *sh; + + fake_sections.in_shstrndx = GET_LE(&sym->st_shndx); + fake_sections.shstrtab = addr + GET_LE(&sym->st_value); + fake_sections.shstrtab_len = GET_LE(&sym->st_size); + sh = addr + GET_LE(&hdr->e_shoff) + + GET_LE(&hdr->e_shentsize) * + fake_sections.in_shstrndx; + fake_sections.shstr_offset = GET_LE(&sym->st_value) - + GET_LE(&sh->sh_addr); } } + /* Build the output section table. */ + if (!syms[sym_VDSO_FAKE_SECTION_TABLE_START] || + !syms[sym_VDSO_FAKE_SECTION_TABLE_END]) + fail("couldn't find fake section table\n"); + if ((syms[sym_VDSO_FAKE_SECTION_TABLE_END] - + syms[sym_VDSO_FAKE_SECTION_TABLE_START]) % sizeof(ELF(Shdr))) + fail("fake section table size isn't a multiple of sizeof(Shdr)\n"); + fake_sections.table = addr + syms[sym_VDSO_FAKE_SECTION_TABLE_START]; + fake_sections.table_offset = syms[sym_VDSO_FAKE_SECTION_TABLE_START]; + fake_sections.max_count = (syms[sym_VDSO_FAKE_SECTION_TABLE_END] - + syms[sym_VDSO_FAKE_SECTION_TABLE_START]) / + sizeof(ELF(Shdr)); + + BITSFUNC(init_sections)(&fake_sections); + for (i = 0; i < GET_LE(&hdr->e_shnum); i++) { + ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) + + GET_LE(&hdr->e_shentsize) * i; + BITSFUNC(copy_section)(&fake_sections, i, sh, + secstrings + GET_LE(&sh->sh_name)); + } + if (!fake_sections.out_shstrndx) + fail("didn't generate shstrndx?!?\n"); + + PUT_LE(&hdr->e_shoff, fake_sections.table_offset); + PUT_LE(&hdr->e_shentsize, sizeof(ELF(Shdr))); + PUT_LE(&hdr->e_shnum, fake_sections.count); + PUT_LE(&hdr->e_shstrndx, fake_sections.out_shstrndx); + /* Validate mapping addresses. */ for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) { if (!syms[i]) @@ -112,25 +262,17 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) if (syms[i] % 4096) fail("%s must be a multiple of 4096\n", - required_syms[i]); + required_syms[i].name); if (syms[i] < data_size) fail("%s must be after the text mapping\n", - required_syms[i]); + required_syms[i].name); if (syms[sym_end_mapping] < syms[i] + 4096) - fail("%s overruns end_mapping\n", required_syms[i]); + fail("%s overruns end_mapping\n", + required_syms[i].name); } if (syms[sym_end_mapping] % 4096) fail("end_mapping must be a multiple of 4096\n"); - /* Remove sections or use fakes */ - if (fake_sections_size % sizeof(Elf_Shdr)) - fail("vdso_fake_sections size is not a multiple of %ld\n", - (long)sizeof(Elf_Shdr)); - PUT_LE(&hdr->e_shoff, fake_sections_value); - PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0); - PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr)); - PUT_LE(&hdr->e_shstrndx, SHN_UNDEF); - if (!name) { fwrite(addr, load_size, 1, outfile); return; @@ -168,9 +310,9 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) (unsigned long)GET_LE(&alt_sec->sh_size)); } for (i = 0; i < NSYMS; i++) { - if (syms[i]) + if (required_syms[i].export && syms[i]) fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n", - required_syms[i], syms[i]); + required_syms[i].name, syms[i]); } fprintf(outfile, "};\n"); } diff --git a/arch/x86/vdso/vdso32/vdso-fakesections.c b/arch/x86/vdso/vdso32/vdso-fakesections.c new file mode 100644 index 000000000000..541468e25265 --- /dev/null +++ b/arch/x86/vdso/vdso32/vdso-fakesections.c @@ -0,0 +1 @@ +#include "../vdso-fakesections.c" diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/vdso/vdsox32.lds.S index 46b991b578a8..697c11ece90c 100644 --- a/arch/x86/vdso/vdsox32.lds.S +++ b/arch/x86/vdso/vdsox32.lds.S @@ -6,6 +6,8 @@ * the DSO. */ +#define BUILD_VDSOX32 + #include "vdso-layout.lds.S" /* diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index e1513c47872a..5a5176de8d0a 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -62,6 +62,9 @@ struct linux_binprm; Only used for the 64-bit and x32 vdsos. */ static unsigned long vdso_addr(unsigned long start, unsigned len) { +#ifdef CONFIG_X86_32 + return 0; +#else unsigned long addr, end; unsigned offset; end = (start + PMD_SIZE - 1) & PMD_MASK; @@ -83,6 +86,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) addr = align_vdso_addr(addr); return addr; +#endif } static int map_vdso(const struct vdso_image *image, bool calculate_addr) |