diff options
Diffstat (limited to 'arch/sparc/include')
-rw-r--r-- | arch/sparc/include/asm/atomic_32.h | 4 | ||||
-rw-r--r-- | arch/sparc/include/asm/atomic_64.h | 6 | ||||
-rw-r--r-- | arch/sparc/include/asm/bitops_64.h | 5 | ||||
-rw-r--r-- | arch/sparc/include/asm/elf_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgalloc_64.h | 76 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 15 | ||||
-rw-r--r-- | arch/sparc/include/asm/prom.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/ptrace.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/smp_32.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/smp_64.h | 2 |
10 files changed, 78 insertions, 38 deletions
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7ae128b19d3f..5c3c8b69884d 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -22,7 +22,7 @@ extern int __atomic_add_return(int, atomic_t *); extern int atomic_cmpxchg(atomic_t *, int, int); #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -extern int atomic_add_unless(atomic_t *, int, int); +extern int __atomic_add_unless(atomic_t *, int, int); extern void atomic_set(atomic_t *, int); #define atomic_read(v) (*(volatile int *)&(v)->counter) @@ -52,7 +52,6 @@ extern void atomic_set(atomic_t *, int); #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /* This is the old 24-bit implementation. It's still used internally * by some sparc-specific code, notably the semaphore implementation. @@ -161,5 +160,4 @@ static inline int __atomic24_sub(int i, atomic24_t *v) #endif /* !(__KERNEL__) */ -#include <asm-generic/atomic-long.h> #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index bdb2ff880bdd..9f421df46aec 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -70,7 +70,7 @@ extern long atomic64_sub_ret(long, atomic64_t *); #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -82,10 +82,9 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) @@ -114,5 +113,4 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include <asm-generic/atomic-long.h> #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h index 38e9aa1b2cea..325e295d60de 100644 --- a/arch/sparc/include/asm/bitops_64.h +++ b/arch/sparc/include/asm/bitops_64.h @@ -91,10 +91,7 @@ static inline unsigned int __arch_hweight8(unsigned int w) #include <asm-generic/bitops/le.h> -#define ext2_set_bit_atomic(lock,nr,addr) \ - test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) -#define ext2_clear_bit_atomic(lock,nr,addr) \ - test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) +#include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __KERNEL__ */ diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index e67880381b84..cfa9cd2e5519 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -186,7 +186,7 @@ static inline unsigned int sparc64_elf_hwcap(void) return cap; } -#define ELF_HWCAP sparc64_elf_hwcap(); +#define ELF_HWCAP sparc64_elf_hwcap() /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 4e5e0878144f..40b2d7a7023d 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h @@ -5,7 +5,6 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/quicklist.h> #include <asm/spitfire.h> #include <asm/cpudata.h> @@ -14,71 +13,114 @@ /* Page table allocation/freeing. */ +extern struct kmem_cache *pgtable_cache; + static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return quicklist_alloc(0, GFP_KERNEL, NULL); + return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - quicklist_free(0, NULL, pgd); + kmem_cache_free(pgtable_cache, pgd); } #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - return quicklist_alloc(0, GFP_KERNEL, NULL); + return kmem_cache_alloc(pgtable_cache, + GFP_KERNEL|__GFP_REPEAT); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { - quicklist_free(0, NULL, pmd); + kmem_cache_free(pgtable_cache, pmd); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return quicklist_alloc(0, GFP_KERNEL, NULL); + return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *page; - void *pg; + pte_t *pte; - pg = quicklist_alloc(0, GFP_KERNEL, NULL); - if (!pg) + pte = pte_alloc_one_kernel(mm, address); + if (!pte) return NULL; - page = virt_to_page(pg); + page = virt_to_page(pte); pgtable_page_ctor(page); return page; } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - quicklist_free(0, NULL, pte); + free_page((unsigned long)pte); } static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { pgtable_page_dtor(ptepage); - quicklist_free_page(0, NULL, ptepage); + __free_page(ptepage); } - #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) #define pmd_populate(MM,PMD,PTE_PAGE) \ pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) #define pmd_pgtable(pmd) pmd_page(pmd) -static inline void check_pgt_cache(void) +#define check_pgt_cache() do { } while (0) + +static inline void pgtable_free(void *table, bool is_page) +{ + if (is_page) + free_page((unsigned long)table); + else + kmem_cache_free(pgtable_cache, table); +} + +#ifdef CONFIG_SMP + +struct mmu_gather; +extern void tlb_remove_table(struct mmu_gather *, void *); + +static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) +{ + unsigned long pgf = (unsigned long)table; + if (is_page) + pgf |= 0x1UL; + tlb_remove_table(tlb, (void *)pgf); +} + +static inline void __tlb_remove_table(void *_table) +{ + void *table = (void *)((unsigned long)_table & ~0x1UL); + bool is_page = false; + + if ((unsigned long)_table & 0x1UL) + is_page = true; + pgtable_free(table, is_page); +} +#else /* CONFIG_SMP */ +static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) { - quicklist_trim(0, NULL, 25, 16); + pgtable_free(table, is_page); +} +#endif /* !CONFIG_SMP */ + +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, + unsigned long address) +{ + pgtable_page_dtor(ptepage); + pgtable_free_tlb(tlb, page_address(ptepage), true); } -#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) -#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) +#define __pmd_free_tlb(tlb, pmd, addr) \ + pgtable_free_tlb(tlb, pmd, false) #endif /* _SPARC64_PGALLOC_H */ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 1e03c5a6b4f7..adf89329af59 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -95,6 +95,10 @@ /* PTE bits which are the same in SUN4U and SUN4V format. */ #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ +#define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ + +/* Advertise support for _PAGE_SPECIAL */ +#define __HAVE_ARCH_PTE_SPECIAL /* SUN4U pte bits... */ #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */ @@ -104,6 +108,7 @@ #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */ #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ +#define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */ #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ @@ -133,6 +138,7 @@ #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */ #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ +#define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */ #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ @@ -302,10 +308,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) : "=r" (mask), "=r" (tmp) : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | - _PAGE_SZBITS_4U), + _PAGE_SZBITS_4U | _PAGE_SPECIAL), "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | - _PAGE_SZBITS_4V)); + _PAGE_SZBITS_4V | _PAGE_SPECIAL)); return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); } @@ -502,6 +508,7 @@ static inline pte_t pte_mkyoung(pte_t pte) static inline pte_t pte_mkspecial(pte_t pte) { + pte_val(pte) |= _PAGE_SPECIAL; return pte; } @@ -607,9 +614,9 @@ static inline unsigned long pte_present(pte_t pte) return val; } -static inline int pte_special(pte_t pte) +static inline unsigned long pte_special(pte_t pte) { - return 0; + return pte_val(pte) & _PAGE_SPECIAL; } #define pmd_set(pmdp, ptep) \ diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index 56bbaadef646..edd3d3cde460 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h @@ -21,7 +21,7 @@ #include <linux/of_pdt.h> #include <linux/proc_fs.h> #include <linux/mutex.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2 #define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index b928b31424b1..a0e1bcf843a1 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h @@ -213,7 +213,6 @@ extern unsigned long profile_pc(struct pt_regs *); #else #define profile_pc(regs) instruction_pointer(regs) #endif -extern void show_regs(struct pt_regs *); #endif /* (__KERNEL__) */ #else /* __ASSEMBLY__ */ @@ -257,7 +256,6 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs) #define instruction_pointer(regs) ((regs)->pc) #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) unsigned long profile_pc(struct pt_regs *); -extern void show_regs(struct pt_regs *); #endif /* (__KERNEL__) */ #else /* (!__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index 093f10843ff2..01c51c704341 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -22,7 +22,7 @@ #include <asm/ptrace.h> #include <asm/asi.h> -#include <asm/atomic.h> +#include <linux/atomic.h> /* * Private routines/data diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index 20bca8950710..29862a9e9065 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -27,7 +27,7 @@ */ #include <linux/bitops.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/percpu.h> DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); |