diff options
Diffstat (limited to 'arch/ia64/include')
-rw-r--r-- | arch/ia64/include/asm/elf.h | 15 | ||||
-rw-r--r-- | arch/ia64/include/asm/sections.h | 13 | ||||
-rw-r--r-- | arch/ia64/include/asm/sn/bte.h | 9 |
3 files changed, 32 insertions, 5 deletions
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 5e0c1a6bce8d..2acb6b6543c9 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -266,4 +266,19 @@ do { \ } \ } while (0) +/* + * format for entries in the Global Offset Table + */ +struct got_entry { + uint64_t val; +}; + +/* + * Layout of the Function Descriptor + */ +struct fdesc { + uint64_t ip; + uint64_t gp; +}; + #endif /* _ASM_IA64_ELF_H */ diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index a7acad2bc2f0..f66799891036 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h @@ -6,6 +6,8 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ +#include <linux/elf.h> +#include <linux/uaccess.h> #include <asm-generic/sections.h> extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; @@ -22,7 +24,16 @@ extern char __start_unwind[], __end_unwind[]; extern char __start_ivt_text[], __end_ivt_text[]; #undef dereference_function_descriptor -void *dereference_function_descriptor(void *); +static inline void *dereference_function_descriptor(void *ptr) +{ + struct fdesc *desc = ptr; + void *p; + + if (!probe_kernel_address(&desc->ip, p)) + ptr = p; + return ptr; +} + #endif /* _ASM_IA64_SECTIONS_H */ diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h index a0d214f43115..5efecf06c9a4 100644 --- a/arch/ia64/include/asm/sn/bte.h +++ b/arch/ia64/include/asm/sn/bte.h @@ -223,10 +223,11 @@ extern void bte_error_handler(unsigned long); * until the transfer is complete. In order to get the asynch * version of bte_copy, you must perform this check yourself. */ -#define BTE_UNALIGNED_COPY(src, dest, len, mode) \ - (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \ - (dest & L1_CACHE_MASK)) ? \ - bte_unaligned_copy(src, dest, len, mode) : \ +#define BTE_UNALIGNED_COPY(src, dest, len, mode) \ + (((len & (L1_CACHE_BYTES - 1)) || \ + (src & (L1_CACHE_BYTES - 1)) || \ + (dest & (L1_CACHE_BYTES - 1))) ? \ + bte_unaligned_copy(src, dest, len, mode) : \ bte_copy(src, dest, len, mode, NULL)) |