summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorAlexey Samsonov <vonosmas@gmail.com>2014-08-27 23:06:08 +0000
committerAlexey Samsonov <vonosmas@gmail.com>2014-08-27 23:06:08 +0000
commita8d2f819ad8c3137e67ff2c1b23fc3281b92c156 (patch)
treef8a7b19fc4c586555094b93f24f98fac4e64045f /llvm/lib
parent76e0fc9884adbcb1e797cfd60ff3d772801717ba (diff)
downloadbcm5719-llvm-a8d2f819ad8c3137e67ff2c1b23fc3281b92c156.tar.gz
bcm5719-llvm-a8d2f819ad8c3137e67ff2c1b23fc3281b92c156.zip
Fix unaligned reads/writes in X86JIT and RuntimeDyldELF.
Summary: Introduce support::ulittleX_t::ref type to Support/Endian.h and use it in x86 JIT to enforce correct endianness and fix unaligned accesses. Test Plan: regression test suite Reviewers: lhames Subscribers: ributzka, llvm-commits Differential Revision: http://reviews.llvm.org/D5011 llvm-svn: 216631
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp48
-rw-r--r--llvm/lib/Target/X86/X86JITInfo.cpp62
2 files changed, 61 insertions, 49 deletions
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index 9aa04119724..adea5ad2241 100644
--- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -23,6 +23,7 @@
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/ELF.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/MemoryBuffer.h"
using namespace llvm;
@@ -260,10 +261,9 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
llvm_unreachable("Relocation type not implemented yet!");
break;
case ELF::R_X86_64_64: {
- uint64_t *Target = reinterpret_cast<uint64_t *>(Section.Address + Offset);
- *Target = Value + Addend;
+ support::ulittle64_t::ref(Section.Address + Offset) = Value + Addend;
DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
- << format("%p\n", Target));
+ << format("%p\n", Section.Address + Offset));
break;
}
case ELF::R_X86_64_32:
@@ -273,17 +273,15 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
(Type == ELF::R_X86_64_32S &&
((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
- uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
- *Target = TruncatedAddr;
+ support::ulittle32_t::ref(Section.Address + Offset) = TruncatedAddr;
DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
- << format("%p\n", Target));
+ << format("%p\n", Section.Address + Offset));
break;
}
case ELF::R_X86_64_GOTPCREL: {
// findGOTEntry returns the 'G + GOT' part of the relocation calculation
// based on the load/target address of the GOT (not the current/local addr).
uint64_t GOTAddr = findGOTEntry(Value, SymOffset);
- uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
uint64_t FinalAddress = Section.LoadAddress + Offset;
// The processRelocationRef method combines the symbol offset and the addend
// and in most cases that's what we want. For this relocation type, we need
@@ -291,30 +289,29 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
int64_t RealOffset = GOTAddr + Addend - SymOffset - FinalAddress;
assert(RealOffset <= INT32_MAX && RealOffset >= INT32_MIN);
int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
- *Target = TruncOffset;
+ support::ulittle32_t::ref(Section.Address + Offset) = TruncOffset;
break;
}
case ELF::R_X86_64_PC32: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint32_t *Placeholder =
- reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
- uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
+ support::ulittle32_t::ref Placeholder(
+ (void *)(Section.ObjAddress + Offset));
uint64_t FinalAddress = Section.LoadAddress + Offset;
- int64_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
+ int64_t RealOffset = Placeholder + Value + Addend - FinalAddress;
assert(RealOffset <= INT32_MAX && RealOffset >= INT32_MIN);
int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
- *Target = TruncOffset;
+ support::ulittle32_t::ref(Section.Address + Offset) = TruncOffset;
break;
}
case ELF::R_X86_64_PC64: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint64_t *Placeholder =
- reinterpret_cast<uint64_t *>(Section.ObjAddress + Offset);
- uint64_t *Target = reinterpret_cast<uint64_t *>(Section.Address + Offset);
+ support::ulittle64_t::ref Placeholder(
+ (void *)(Section.ObjAddress + Offset));
uint64_t FinalAddress = Section.LoadAddress + Offset;
- *Target = *Placeholder + Value + Addend - FinalAddress;
+ support::ulittle64_t::ref(Section.Address + Offset) =
+ Placeholder + Value + Addend - FinalAddress;
break;
}
}
@@ -327,21 +324,20 @@ void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
case ELF::R_386_32: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint32_t *Placeholder =
- reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
- uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
- *Target = *Placeholder + Value + Addend;
+ support::ulittle32_t::ref Placeholder(
+ (void *)(Section.ObjAddress + Offset));
+ support::ulittle32_t::ref(Section.Address + Offset) =
+ Placeholder + Value + Addend;
break;
}
case ELF::R_386_PC32: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint32_t *Placeholder =
- reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
- uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
+ support::ulittle32_t::ref Placeholder(
+ (void *)(Section.ObjAddress + Offset));
uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
- uint32_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
- *Target = RealOffset;
+ uint32_t RealOffset = Placeholder + Value + Addend - FinalAddress;
+ support::ulittle32_t::ref(Section.Address + Offset) = RealOffset;
break;
}
default:
diff --git a/llvm/lib/Target/X86/X86JITInfo.cpp b/llvm/lib/Target/X86/X86JITInfo.cpp
index a082c4f8b0e..32551930aee 100644
--- a/llvm/lib/Target/X86/X86JITInfo.cpp
+++ b/llvm/lib/Target/X86/X86JITInfo.cpp
@@ -17,6 +17,7 @@
#include "X86TargetMachine.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Valgrind.h"
#include <cstdlib>
@@ -32,13 +33,24 @@ using namespace llvm;
# define X86_32_JIT
#endif
+// x86 is little-endian, and we can do unaligned memory accesses.
+template<typename value_type>
+static value_type read_x86(const void *memory) {
+ return support::endian::read<value_type, support::little, 1>(memory);
+}
+
+template<typename value_type>
+static void write_x86(void *memory, value_type value) {
+ support::endian::write<value_type, support::little, 1>(memory, value);
+}
+
void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
- unsigned char *OldByte = (unsigned char *)Old;
- *OldByte++ = 0xE9; // Emit JMP opcode.
- unsigned *OldWord = (unsigned *)OldByte;
+ unsigned char *OldPtr = static_cast<unsigned char*>(Old);
+ write_x86<unsigned char>(OldPtr++, 0xE9); // Emit JMP opcode.
unsigned NewAddr = (intptr_t)New;
- unsigned OldAddr = (intptr_t)OldWord;
- *OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code.
+ unsigned OldAddr = (intptr_t)OldPtr;
+ write_x86<unsigned>(
+ OldPtr, NewAddr - OldAddr - 4); // Emit PC-relative addr of New code.
// X86 doesn't need to invalidate the processor cache, so just invalidate
// Valgrind's cache directly.
@@ -351,31 +363,35 @@ LLVM_LIBRARY_VISIBILITY void LLVMX86CompilationCallback2(intptr_t *StackPtr,
"Could not find return address on the stack!");
// It's a stub if there is an interrupt marker after the call.
- bool isStub = ((unsigned char*)RetAddr)[0] == 0xCE;
+ unsigned char *RetAddrPtr = (unsigned char*)RetAddr;
+ bool isStub = read_x86<unsigned char>(RetAddrPtr) == 0xCE;
// The call instruction should have pushed the return value onto the stack...
#if defined (X86_64_JIT)
- RetAddr--; // Backtrack to the reference itself...
+ RetAddrPtr--; // Backtrack to the reference itself...
#else
- RetAddr -= 4; // Backtrack to the reference itself...
+ RetAddrPtr -= 4; // Backtrack to the reference itself...
#endif
#if 0
- DEBUG(dbgs() << "In callback! Addr=" << (void*)RetAddr
+ DEBUG(dbgs() << "In callback! Addr=" << RetAddrPtr
<< " ESP=" << (void*)StackPtr
<< ": Resolving call to function: "
- << TheVM->getFunctionReferencedName((void*)RetAddr) << "\n");
+ << TheVM->getFunctionReferencedName(RetAddrPtr) << "\n");
#endif
// Sanity check to make sure this really is a call instruction.
#if defined (X86_64_JIT)
- assert(((unsigned char*)RetAddr)[-2] == 0x41 &&"Not a call instr!");
- assert(((unsigned char*)RetAddr)[-1] == 0xFF &&"Not a call instr!");
+ assert(read_x86<unsigned char>(RetAddrPtr - 2) == 0x41 &&
+ "Not a call instr!");
+ assert(read_x86<unsigned char>(RetAddrPtr - 1) == 0xFF &&
+ "Not a call instr!");
#else
- assert(((unsigned char*)RetAddr)[-1] == 0xE8 &&"Not a call instr!");
+ assert(read_x86<unsigned char>(RetAddrPtr - 1) == 0xE8 &&
+ "Not a call instr!");
#endif
- intptr_t NewVal = (intptr_t)JITCompilerFunction((void*)RetAddr);
+ intptr_t NewVal = (intptr_t)JITCompilerFunction(RetAddrPtr);
// Rewrite the call target... so that we don't end up here every time we
// execute the call.
@@ -384,7 +400,7 @@ LLVM_LIBRARY_VISIBILITY void LLVMX86CompilationCallback2(intptr_t *StackPtr,
"X86-64 doesn't support rewriting non-stub lazy compilation calls:"
" the call instruction varies too much.");
#else
- *(intptr_t *)RetAddr = (intptr_t)(NewVal-RetAddr-4);
+ write_x86<intptr_t>(RetAddrPtr, NewVal - (intptr_t)RetAddrPtr - 4);
#endif
if (isStub) {
@@ -397,18 +413,18 @@ LLVM_LIBRARY_VISIBILITY void LLVMX86CompilationCallback2(intptr_t *StackPtr,
// PC-relative branch instead of loading the actual address. (This is
// considerably shorter than the 64-bit immediate load already there.)
// We assume here intptr_t is 64 bits.
- intptr_t diff = NewVal-RetAddr+7;
+ intptr_t diff = NewVal - (intptr_t)RetAddrPtr + 7;
if (diff >= -2147483648LL && diff <= 2147483647LL) {
- *(unsigned char*)(RetAddr-0xc) = 0xE9;
- *(intptr_t *)(RetAddr-0xb) = diff & 0xffffffff;
+ write_x86<unsigned char>(RetAddrPtr - 0xC, 0xE9);
+ write_x86<intptr_t>(RetAddrPtr - 0xB, diff & 0xffffffff);
} else {
- *(intptr_t *)(RetAddr - 0xa) = NewVal;
- ((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6));
+ write_x86<intptr_t>(RetAddrPtr - 0xA, NewVal);
+ write_x86<unsigned char>(RetAddrPtr, (2 | (4 << 3) | (3 << 6)));
}
- sys::ValgrindDiscardTranslations((void*)(RetAddr-0xc), 0xd);
+ sys::ValgrindDiscardTranslations(RetAddrPtr - 0xC, 0xd);
#else
- ((unsigned char*)RetAddr)[-1] = 0xE9;
- sys::ValgrindDiscardTranslations((void*)(RetAddr-1), 5);
+ write_x86<unsigned char>(RetAddrPtr - 1, 0xE9);
+ sys::ValgrindDiscardTranslations(RetAddrPtr - 1, 5);
#endif
}
OpenPOWER on IntegriCloud