summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Support/Unix/Memory.inc
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2014-03-29 10:18:08 +0000
committerTim Northover <tnorthover@apple.com>2014-03-29 10:18:08 +0000
commit00ed9964c65962e2afc8e3c83a2f7114b0ce25a0 (patch)
tree21404a5e99549c2f98d72eac05f9df0b484d2d28 /llvm/lib/Support/Unix/Memory.inc
parent3e38d290c872ae8d875b8dbe2d55262cee3a3cf9 (diff)
downloadbcm5719-llvm-00ed9964c65962e2afc8e3c83a2f7114b0ce25a0.tar.gz
bcm5719-llvm-00ed9964c65962e2afc8e3c83a2f7114b0ce25a0.zip
ARM64: initial backend import
This adds a second implementation of the AArch64 architecture to LLVM, accessible in parallel via the "arm64" triple. The plan over the coming weeks & months is to merge the two into a single backend, during which time thorough code review should naturally occur. Everything will be easier with the target in-tree though, hence this commit. llvm-svn: 205090
Diffstat (limited to 'llvm/lib/Support/Unix/Memory.inc')
-rw-r--r--llvm/lib/Support/Unix/Memory.inc15
1 files changed, 8 insertions, 7 deletions
diff --git a/llvm/lib/Support/Unix/Memory.inc b/llvm/lib/Support/Unix/Memory.inc
index 58fda420eb6..08cd34d5329 100644
--- a/llvm/lib/Support/Unix/Memory.inc
+++ b/llvm/lib/Support/Unix/Memory.inc
@@ -205,7 +205,7 @@ Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
void* start = NearBlock ? (unsigned char*)NearBlock->base() +
NearBlock->size() : 0;
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
flags, fd, 0);
#else
@@ -220,7 +220,7 @@ Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
return MemoryBlock();
}
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
(vm_size_t)(PageSize*NumPages), 0,
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
@@ -253,7 +253,7 @@ bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
}
bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
if (M.Address == 0 || M.Size == 0) return false;
Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
@@ -265,7 +265,7 @@ bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
}
bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
if (M.Address == 0 || M.Size == 0) return false;
Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
@@ -280,7 +280,7 @@ bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
}
bool Memory::setRangeWritable(const void *Addr, size_t Size) {
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
VM_PROT_READ | VM_PROT_WRITE);
@@ -291,7 +291,7 @@ bool Memory::setRangeWritable(const void *Addr, size_t Size) {
}
bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
-#if defined(__APPLE__) && defined(__arm__)
+#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
@@ -311,7 +311,8 @@ void Memory::InvalidateInstructionCache(const void *Addr,
#if defined(__APPLE__)
# if (defined(__POWERPC__) || defined (__ppc__) || \
- defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
+ defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \
+ defined(__arm64__))
sys_icache_invalidate(const_cast<void *>(Addr), Len);
# endif
OpenPOWER on IntegriCloud