diff options
Diffstat (limited to 'include/asm-ia64')
32 files changed, 306 insertions, 84 deletions
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild new file mode 100644 index 000000000000..15818a18bc52 --- /dev/null +++ b/include/asm-ia64/Kbuild @@ -0,0 +1,17 @@ +include include/asm-generic/Kbuild.asm + +header-y += break.h +header-y += fpu.h +header-y += fpswa.h +header-y += gcc_intrin.h +header-y += ia64regs.h +header-y += intel_intrin.h +header-y += intrinsics.h +header-y += perfmon_default_smpl.h +header-y += ptrace_offsets.h +header-y += rse.h +header-y += setup.h +header-y += ucontext.h + +unifdef-y += perfmon.h +unifdef-y += ustack.h diff --git a/include/asm-ia64/esi.h b/include/asm-ia64/esi.h new file mode 100644 index 000000000000..84aac0e0b583 --- /dev/null +++ b/include/asm-ia64/esi.h @@ -0,0 +1,30 @@ +/* + * ESI service calls. + * + * Copyright (c) Copyright 2005-2006 Hewlett-Packard Development Company, L.P. + * Alex Williamson <alex.williamson@hp.com> + */ +#ifndef esi_h +#define esi_h + +#include <linux/efi.h> + +#define ESI_QUERY 0x00000001 +#define ESI_OPEN_HANDLE 0x02000000 +#define ESI_CLOSE_HANDLE 0x02000001 + +enum esi_proc_type { + ESI_PROC_SERIALIZED, /* calls need to be serialized */ + ESI_PROC_MP_SAFE, /* MP-safe, but not reentrant */ + ESI_PROC_REENTRANT /* MP-safe and reentrant */ +}; + +extern int ia64_esi_init (void); +extern struct ia64_sal_retval esi_call_phys (void *, u64 *); +extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *, + enum esi_proc_type, + u64, u64, u64, u64, u64, u64, u64, u64); +extern int ia64_esi_call_phys(efi_guid_t, struct ia64_sal_retval *, u64, u64, + u64, u64, u64, u64, u64, u64); + +#endif /* esi_h */ diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h index 6a332a9f099c..07d77f3a8cbe 100644 --- a/include/asm-ia64/futex.h +++ b/include/asm-ia64/futex.h @@ -1,6 +1,124 @@ #ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H -#include <asm-generic/futex.h> +#include <linux/futex.h> +#include <asm/errno.h> +#include <asm/system.h> +#include <asm/uaccess.h> -#endif +#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ +do { \ + register unsigned long r8 __asm ("r8") = 0; \ + __asm__ __volatile__( \ + " mf;; \n" \ + "[1:] " insn ";; \n" \ + " .xdata4 \"__ex_table\", 1b-., 2f-. \n" \ + "[2:]" \ + : "+r" (r8), "=r" (oldval) \ + : "r" (uaddr), "r" (oparg) \ + : "memory"); \ + ret = r8; \ +} while (0) + +#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ +do { \ + register unsigned long r8 __asm ("r8") = 0; \ + int val, newval; \ + do { \ + __asm__ __volatile__( \ + " mf;; \n" \ + "[1:] ld4 %3=[%4];; \n" \ + " mov %2=%3 \n" \ + insn ";; \n" \ + " mov ar.ccv=%2;; \n" \ + "[2:] cmpxchg4.acq %1=[%4],%3,ar.ccv;; \n" \ + " .xdata4 \"__ex_table\", 1b-., 3f-.\n" \ + " .xdata4 \"__ex_table\", 2b-., 3f-.\n" \ + "[3:]" \ + : "+r" (r8), "=r" (val), "=&r" (oldval), \ + "=&r" (newval) \ + : "r" (uaddr), "r" (oparg) \ + : "memory"); \ + if (unlikely (r8)) \ + break; \ + } while (unlikely (val != oldval)); \ + ret = r8; \ +} while (0) + +static inline int +futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op1("xchg4 %1=[%2],%3", ret, oldval, uaddr, + oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op2("add %3=%3,%5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op2("or %3=%3,%5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op2("and %3=%3,%5", ret, oldval, uaddr, + ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op2("xor %3=%3,%5", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + dec_preempt_count(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + { + register unsigned long r8 __asm ("r8"); + __asm__ __volatile__( + " mf;; \n" + " mov ar.ccv=%3;; \n" + "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" + " .xdata4 \"__ex_table\", 1b-., 2f-. \n" + "[2:]" + : "=r" (r8) + : "r" (uaddr), "r" (newval), + "rO" ((long) (unsigned) oldval) + : "memory"); + return r8; + } +} + +#endif /* _ASM_FUTEX_H */ diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index ea8b8c407ab4..27f9df6b9145 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h @@ -97,8 +97,7 @@ extern int reserve_irq_vector (int vector); extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); -static inline void -hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector) +static inline void ia64_resend_irq(unsigned int vector) { platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); } diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index 781ee2c7e8c3..43bfff6c6b87 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -90,7 +90,7 @@ phys_to_virt (unsigned long address) #define ARCH_HAS_VALID_PHYS_ADDR_RANGE extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size); extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */ -extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count); +extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count); /* * The following two macros are deprecated and scheduled for removal. diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h index dbe86c0bbce5..79479e2c6966 100644 --- a/include/asm-ia64/irq.h +++ b/include/asm-ia64/irq.h @@ -14,11 +14,6 @@ #define NR_IRQS 256 #define NR_IRQ_VECTORS NR_IRQS -/* - * IRQ line status macro IRQ_PER_CPU is used - */ -#define ARCH_HAS_IRQ_PER_CPU - static __inline__ int irq_canonicalize (int irq) { diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index c195a9ad1255..aed7142f9e4a 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h @@ -40,6 +40,8 @@ struct die_args { extern int register_die_notifier(struct notifier_block *); extern int unregister_die_notifier(struct notifier_block *); +extern int register_page_fault_notifier(struct notifier_block *); +extern int unregister_page_fault_notifier(struct notifier_block *); extern struct atomic_notifier_head ia64die_chain; enum die_val { diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 8c0fc227f0fb..1b45b71c79b9 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h @@ -29,7 +29,8 @@ #include <linux/percpu.h> #include <asm/break.h> -#define MAX_INSN_SIZE 16 +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 1 #define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) typedef union cmp_inst { @@ -82,6 +83,7 @@ struct kprobe_ctlblk { #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 1 #define SLOT0_OPCODE_SHIFT (37) #define SLOT1_p1_OPCODE_SHIFT (37 - (64-46)) @@ -93,7 +95,7 @@ struct kprobe_ctlblk { #define IP_RELATIVE_PREDICT_OPCODE (7) #define LONG_BRANCH_OPCODE (0xC) #define LONG_CALL_OPCODE (0xD) -#define arch_remove_kprobe(p) do {} while (0) +#define flush_insn_slot(p) do { } while (0) typedef struct kprobe_opcode { bundle_t bundle; @@ -107,7 +109,7 @@ struct fnptr { /* Architecture specific copy of original instruction*/ struct arch_specific_insn { /* copy of the instruction to be emulated */ - kprobe_opcode_t insn; + kprobe_opcode_t *insn; #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 #define INST_FLAG_FIX_BRANCH_REG 2 #define INST_FLAG_BREAK_INST 4 @@ -124,5 +126,6 @@ static inline void jprobe_return(void) } extern void invalidate_stacked_regs(void); extern void flush_register_stack(void); +extern void arch_remove_kprobe(struct kprobe *p); #endif /* _ASM_KPROBES_H */ diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 27c9203d8ce3..76203f9a8718 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h @@ -197,9 +197,9 @@ movl temp2 = start_addr; \ ;; \ mov cr.iip = temp2; \ + movl gp = __gp \ ;; \ DATA_PA_TO_VA(sp, temp1); \ - DATA_PA_TO_VA(gp, temp2); \ srlz.i; \ ;; \ nop 1; \ diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index 894bc4d89dc0..c3b1f862e6e7 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h @@ -55,7 +55,13 @@ extern void efi_memmap_init(unsigned long *, unsigned long *); extern unsigned long vmalloc_end; extern struct page *vmem_map; extern int find_largest_hole (u64 start, u64 end, void *arg); + extern int register_active_ranges (u64 start, u64 end, void *arg); extern int create_mem_map_page_table (u64 start, u64 end, void *arg); + extern int vmemmap_find_next_valid_pfn(int, int); +#else +static inline int vmemmap_find_next_valid_pfn(int node, int i) +{ + return i + 1; +} #endif - #endif /* meminit_h */ diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h index 6ba179f12718..c73b87832a1e 100644 --- a/include/asm-ia64/mman.h +++ b/include/asm-ia64/mman.h @@ -22,4 +22,12 @@ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#define arch_mmap_check ia64_mmap_check +int ia64_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags); +#endif +#endif + #endif /* _ASM_IA64_MMAN_H */ diff --git a/include/asm-ia64/module.h b/include/asm-ia64/module.h index 85c82bd819f2..d2da61e4c49b 100644 --- a/include/asm-ia64/module.h +++ b/include/asm-ia64/module.h @@ -28,7 +28,8 @@ struct mod_arch_specific { #define Elf_Ehdr Elf64_Ehdr #define MODULE_PROC_FAMILY "ia64" -#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY \ + "gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__) #define ARCH_SHF_SMALL SHF_IA_64_SHORT diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h index a140310bf84d..2fb337b0e9b7 100644 --- a/include/asm-ia64/nodedata.h +++ b/include/asm-ia64/nodedata.h @@ -46,6 +46,18 @@ struct ia64_node_data { */ #define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid]) +/* + * LOCAL_DATA_ADDR - This is to calculate the address of other node's + * "local_node_data" at hot-plug phase. The local_node_data + * is pointed by per_cpu_page. Kernel usually use it for + * just executing cpu. However, when new node is hot-added, + * the addresses of local data for other nodes are necessary + * to update all of them. + */ +#define LOCAL_DATA_ADDR(pgdat) \ + ((struct ia64_node_data *)((u64)(pgdat) + \ + L1_CACHE_ALIGN(sizeof(struct pglist_data)))) + #endif /* CONFIG_NUMA */ #endif /* _ASM_IA64_NODEDATA_H */ diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h index e5a8260593a5..7d5e2ccc37a0 100644 --- a/include/asm-ia64/numa.h +++ b/include/asm-ia64/numa.h @@ -64,7 +64,13 @@ extern int paddr_to_nid(unsigned long paddr); #define local_nodeid (cpu_to_node_map[smp_processor_id()]) +extern void map_cpu_to_node(int cpu, int nid); +extern void unmap_cpu_from_node(int cpu, int nid); + + #else /* !CONFIG_NUMA */ +#define map_cpu_to_node(cpu, nid) do{}while(0) +#define unmap_cpu_from_node(cpu, nid) do{}while(0) #define paddr_to_nid(addr) 0 diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index f5a949ec6e1e..947cb72b520e 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -7,6 +7,7 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ +# ifdef __KERNEL__ #include <asm/intrinsics.h> #include <asm/types.h> @@ -64,7 +65,6 @@ # define __pa(x) ((x) - PAGE_OFFSET) # define __va(x) ((x) + PAGE_OFFSET) #else /* !__ASSEMBLY */ -# ifdef __KERNEL__ # define STRICT_MM_TYPECHECKS extern void clear_page (void *page); @@ -174,7 +174,6 @@ get_order (unsigned long size) return order; } -# endif /* __KERNEL__ */ #endif /* !__ASSEMBLY__ */ #ifdef STRICT_MM_TYPECHECKS @@ -228,4 +227,5 @@ get_order (unsigned long size) (((current->personality & READ_IMPLIES_EXEC) != 0) \ ? VM_EXEC : 0)) +# endif /* __KERNEL__ */ #endif /* _ASM_IA64_PAGE_H */ diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 37e52a2836b0..2c8fd92d0ece 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -78,6 +78,7 @@ #define PAL_VM_TR_READ 261 /* read contents of translation register */ #define PAL_GET_PSTATE 262 /* get the current P-state */ #define PAL_SET_PSTATE 263 /* set the P-state */ +#define PAL_BRAND_INFO 274 /* Processor branding information */ #ifndef __ASSEMBLY__ @@ -963,7 +964,8 @@ static inline s64 ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0); + PAL_CALL_PHYS_STK(iprv, PAL_CACHE_READ, line_id.pclid_data, + physical_addr, 0); return iprv.status; } @@ -985,7 +987,8 @@ static inline s64 ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, data); + PAL_CALL_PHYS_STK(iprv, PAL_CACHE_WRITE, line_id.pclid_data, + physical_addr, data); return iprv.status; } @@ -1133,6 +1136,15 @@ ia64_pal_set_pstate (u64 pstate_index) return iprv.status; } +/* Processor branding information*/ +static inline s64 +ia64_pal_get_brand_info (char *brand_info) +{ + struct ia64_pal_retval iprv; + PAL_CALL_STK(iprv, PAL_BRAND_INFO, 0, (u64)brand_info, 0); + return iprv.status; +} + /* Cause the processor to enter LIGHT HALT state, where prefetching and execution are * suspended, but cache and TLB coherency is maintained. */ @@ -1433,7 +1445,12 @@ typedef union pal_version_u { } pal_version_u_t; -/* Return PAL version information */ +/* + * Return PAL version information. While the documentation states that + * PAL_VERSION can be called in either physical or virtual mode, some + * implementations only allow physical calls. We don't call it very often, + * so the overhead isn't worth eliminating. + */ static inline s64 ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) { diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h index ae357d504fba..fbe5cf3ab8dc 100644 --- a/include/asm-ia64/percpu.h +++ b/include/asm-ia64/percpu.h @@ -36,12 +36,14 @@ #ifdef CONFIG_SMP extern unsigned long __per_cpu_offset[NR_CPUS]; +#define per_cpu_offset(x) (__per_cpu_offset(x)) /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset))) +#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset))) extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); extern void setup_per_cpu_areas (void); @@ -51,6 +53,7 @@ extern void *per_cpu_init(void); #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var +#define __raw_get_cpu_var(var) per_cpu__##var #define per_cpu_init() (__phys_per_cpu_start) #endif /* SMP */ diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 228981cadf8f..553182747722 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -275,21 +275,23 @@ ia64_phys_addr_valid (unsigned long addr) #define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd))) #define pmd_present(pmd) (pmd_val(pmd) != 0UL) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) -#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK)) +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK)) #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET)) #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) #define pud_present(pud) (pud_val(pud) != 0UL) #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) -#define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) +#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) #ifdef CONFIG_PGTABLE_4 #define pgd_none(pgd) (!pgd_val(pgd)) #define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd))) #define pgd_present(pgd) (pgd_val(pgd) != 0UL) #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) -#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK)) +#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK)) +#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET)) #endif /* @@ -360,19 +362,19 @@ pgd_offset (struct mm_struct *mm, unsigned long address) #ifdef CONFIG_PGTABLE_4 /* Find an entry in the second-level page table.. */ #define pud_offset(dir,addr) \ - ((pud_t *) pgd_page(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) + ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) #endif /* Find an entry in the third-level page table.. */ #define pmd_offset(dir,addr) \ - ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) + ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) /* * Find an entry in the third-level page table. This looks more complicated than it * should be because some platforms place page tables in high memory. */ #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) +#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) #define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr) #define pte_unmap(pte) do { } while (0) diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 265f4824db0e..5830d36fd8e6 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -20,12 +20,6 @@ #include <asm/ustack.h> #define IA64_NUM_DBG_REGS 8 -/* - * Limits for PMC and PMD are set to less than maximum architected values - * but should be sufficient for a while - */ -#define IA64_NUM_PMC_REGS 64 -#define IA64_NUM_PMD_REGS 64 #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) @@ -163,6 +157,7 @@ struct cpuinfo_ia64 { __u8 family; __u8 archrev; char vendor[16]; + char *model_name; #ifdef CONFIG_NUMA struct ia64_node_data *node_data; @@ -262,13 +257,9 @@ struct thread_struct { # define INIT_THREAD_IA32 #endif /* CONFIG_IA32_SUPPORT */ #ifdef CONFIG_PERFMON - __u64 pmcs[IA64_NUM_PMC_REGS]; - __u64 pmds[IA64_NUM_PMD_REGS]; void *pfm_context; /* pointer to detailed PMU context */ unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ -# define INIT_THREAD_PM .pmcs = {0UL, }, \ - .pmds = {0UL, }, \ - .pfm_context = NULL, \ +# define INIT_THREAD_PM .pfm_context = NULL, \ .pfm_needs_checking = 0UL, #else # define INIT_THREAD_PM diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 415abb23b210..1414316efd40 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h @@ -56,6 +56,8 @@ #include <asm/fpu.h> + +#ifdef __KERNEL__ #ifndef ASM_OFFSETS_C #include <asm/asm-offsets.h> #endif @@ -79,10 +81,9 @@ #define KERNEL_STACK_SIZE IA64_STK_OFFSET -#ifndef __ASSEMBLY__ +#endif /* __KERNEL__ */ -#include <asm/current.h> -#include <asm/page.h> +#ifndef __ASSEMBLY__ /* * This struct defines the way the registers are saved on system @@ -229,6 +230,9 @@ struct switch_stack { #ifdef __KERNEL__ +#include <asm/current.h> +#include <asm/page.h> + #define __ARCH_SYS_PTRACE 1 /* diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h index 1327c91ea39c..2d1640cc240a 100644 --- a/include/asm-ia64/rwsem.h +++ b/include/asm-ia64/rwsem.h @@ -33,9 +33,6 @@ struct rw_semaphore { signed long count; spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif }; #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) @@ -45,19 +42,9 @@ struct rw_semaphore { #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) -/* - * initialization - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } + LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -73,9 +60,6 @@ init_rwsem (struct rw_semaphore *sem) sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif } /* diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h index 5e328ed5d01d..4f5ca5643cb1 100644 --- a/include/asm-ia64/signal.h +++ b/include/asm-ia64/signal.h @@ -56,7 +56,6 @@ * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -76,7 +75,6 @@ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 @@ -114,8 +112,6 @@ #define _NSIG_BPW 64 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) -#define SA_PERCPU_IRQ 0x02000000 - #endif /* __KERNEL__ */ #include <asm-generic/signal.h> diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 719ff309ce09..60fd4ae014f6 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -122,12 +122,11 @@ extern void __init smp_build_cpu_map(void); extern void __init init_smp_config (void); extern void smp_do_timer (struct pt_regs *regs); -extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info, - int retry, int wait); extern void smp_send_reschedule (int cpu); extern void lock_ipi_calllock(void); extern void unlock_ipi_calllock(void); extern void identify_siblings (struct cpuinfo_ia64 *); +extern int is_multithreading_enabled(void); #else diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index cd490b20d592..ba826b3f75bb 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h @@ -85,6 +85,7 @@ #define SN_SAL_GET_PROM_FEATURE_SET 0x02000065 #define SN_SAL_SET_OS_FEATURE_SET 0x02000066 #define SN_SAL_INJECT_ERROR 0x02000067 +#define SN_SAL_SET_CPU_NUMBER 0x02000068 /* * Service-specific constants @@ -705,12 +706,9 @@ static inline int sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array) { struct ia64_sal_retval ret_stuff; - unsigned long irq_flags; - local_irq_save(irq_flags); ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len, (u64)nasid_array, perms, 0, 0, 0); - local_irq_restore(irq_flags); return ret_stuff.status; } #define SN_MEMPROT_ACCESS_CLASS_0 0x14a080 @@ -1142,12 +1140,18 @@ static inline int sn_inject_error(u64 paddr, u64 *data, u64 *ecc) { struct ia64_sal_retval ret_stuff; - unsigned long irq_flags; - local_irq_save(irq_flags); ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data, (u64)ecc, 0, 0, 0, 0); - local_irq_restore(irq_flags); return ret_stuff.status; } + +static inline int +ia64_sn_set_cpu_number(int cpu) +{ + struct ia64_sal_retval rv; + + SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0); + return rv.status; +} #endif /* _ASM_IA64_SN_SN_SAL_H */ diff --git a/include/asm-ia64/sn/tioca_provider.h b/include/asm-ia64/sn/tioca_provider.h index ab7fe2463468..65cdd73c2a57 100644 --- a/include/asm-ia64/sn/tioca_provider.h +++ b/include/asm-ia64/sn/tioca_provider.h @@ -27,7 +27,7 @@ #define PV908234 (1 << 1) /* CA:AGPDMA write request data mismatch with ABC1CL merge */ #define PV895469 (1 << 1) - /* TIO:CA TLB invalidate of written GART entries possibly not occuring in CA*/ + /* TIO:CA TLB invalidate of written GART entries possibly not occurring in CA*/ #define PV910244 (1 << 1) struct tioca_dmamap{ diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h index 9bd2f9bf329b..6f807e0193b7 100644 --- a/include/asm-ia64/sn/xp.h +++ b/include/asm-ia64/sn/xp.h @@ -60,23 +60,37 @@ * the bte_copy() once in the hope that the failure was due to a temporary * aberration (i.e., the link going down temporarily). * - * See bte_copy for definition of the input parameters. + * src - physical address of the source of the transfer. + * vdst - virtual address of the destination of the transfer. + * len - number of bytes to transfer from source to destination. + * mode - see bte_copy() for definition. + * notification - see bte_copy() for definition. * * Note: xp_bte_copy() should never be called while holding a spinlock. */ static inline bte_result_t -xp_bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) +xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) { bte_result_t ret; + u64 pdst = ia64_tpa(vdst); - ret = bte_copy(src, dest, len, mode, notification); + /* + * Ensure that the physically mapped memory is contiguous. + * + * We do this by ensuring that the memory is from region 7 only. + * If the need should arise to use memory from one of the other + * regions, then modify the BUG_ON() statement to ensure that the + * memory from that region is always physically contiguous. + */ + BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); + ret = bte_copy(src, pdst, len, mode, notification); if (ret != BTE_SUCCESS) { if (!in_interrupt()) { cond_resched(); } - ret = bte_copy(src, dest, len, mode, notification); + ret = bte_copy(src, pdst, len, mode, notification); } return ret; diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index 8406f1ef4caf..35e1386f37ab 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h @@ -683,7 +683,9 @@ extern struct xpc_vars *xpc_vars; extern struct xpc_rsvd_page *xpc_rsvd_page; extern struct xpc_vars_part *xpc_vars_part; extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; -extern char xpc_remote_copy_buffer[]; +extern char *xpc_remote_copy_buffer; +extern void *xpc_remote_copy_buffer_base; +extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); extern void xpc_allow_IPI_ops(void); extern void xpc_restrict_IPI_ops(void); @@ -1124,8 +1126,8 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) -#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) -#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) +#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) +#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) static inline void diff --git a/include/asm-ia64/socket.h b/include/asm-ia64/socket.h index a255006fb7b5..d638ef3d50c3 100644 --- a/include/asm-ia64/socket.h +++ b/include/asm-ia64/socket.h @@ -57,5 +57,6 @@ #define SO_ACCEPTCONN 30 #define SO_PEERSEC 31 +#define SO_PASSSEC 34 #endif /* _ASM_IA64_SOCKET_H */ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 65db43ce4de6..384fbf7f2a0f 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -24,7 +24,7 @@ * 0xa000000000000000+2*PERCPU_PAGE_SIZE * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) */ -#define KERNEL_START (GATE_ADDR+0x100000000) +#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) #ifndef __ASSEMBLY__ @@ -98,12 +98,11 @@ extern struct ia64_boot_param { #endif /* - * XXX check on these---I suspect what Linus really wants here is + * XXX check on this ---I suspect what Linus really wants here is * acquire vs release semantics but we can't discuss this stuff with * Linus just yet. Grrr... */ #define set_mb(var, value) do { (var) = (value); mb(); } while (0) -#define set_wmb(var, value) do { (var) = (value); mb(); } while (0) #define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index e5392c4d30c6..8adcde0934ca 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -27,6 +27,7 @@ struct thread_info { __u32 flags; /* thread_info flags (see TIF_*) */ __u32 cpu; /* current CPU */ __u32 last_cpu; /* Last CPU thread ran on */ + __u32 status; /* Thread synchronous flags */ mm_segment_t addr_limit; /* user-level address space limit */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ struct restart_block restart_block; @@ -67,7 +68,7 @@ struct thread_info { #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR -#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) +#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) #endif /* !__ASSEMBLY */ @@ -103,4 +104,8 @@ struct thread_info { /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) +#define TS_POLLING 1 /* true if in idle loop and not sleeping */ + +#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING) + #endif /* _ASM_IA64_THREAD_INFO_H */ diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h index 616b5ed2aa72..937c21257523 100644 --- a/include/asm-ia64/topology.h +++ b/include/asm-ia64/topology.h @@ -112,6 +112,7 @@ void build_cpu_to_node_map(void); #define topology_core_id(cpu) (cpu_data(cpu)->core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) +#define smt_capable() (smp_num_siblings > 1) #endif #include <asm-generic/topology.h> diff --git a/include/asm-ia64/ustack.h b/include/asm-ia64/ustack.h index da55c91246e3..a349467913ea 100644 --- a/include/asm-ia64/ustack.h +++ b/include/asm-ia64/ustack.h @@ -5,12 +5,15 @@ * Constants for the user stack size */ +#ifdef __KERNEL__ #include <asm/page.h> /* The absolute hard limit for stack size is 1/2 of the mappable space in the region */ #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2) -/* Make a default stack size of 2GB */ -#define DEFAULT_USER_STACK_SIZE (1UL << 31) #define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) +#endif + +/* Make a default stack size of 2GiB */ +#define DEFAULT_USER_STACK_SIZE (1UL << 31) #endif /* _ASM_IA64_USTACK_H */ |