diff options
author | Chen Liqin <liqin.chen@sunplusct.com> | 2009-06-19 13:53:49 +0800 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2009-06-19 11:40:13 +0200 |
commit | 0402c91af944c61bf788370f03326959a35cb8be (patch) | |
tree | 1f522cec2d62050e7fcaed676005b7d43922276e | |
parent | 6bcf67374137f433e85aa42a18fde9f0e8562901 (diff) | |
download | blackbird-op-linux-0402c91af944c61bf788370f03326959a35cb8be.tar.gz blackbird-op-linux-0402c91af944c61bf788370f03326959a35cb8be.zip |
score: update files according to review comments
modified: arch/score/include/asm/cacheflush.h
modified: arch/score/include/asm/delay.h
modified: arch/score/include/asm/errno.h
modified: arch/score/include/asm/pgtable-bits.h
modified: arch/score/include/asm/pgtable.h
modified: arch/score/include/asm/ptrace.h
modified: arch/score/include/asm/unistd.h
modified: arch/score/kernel/entry.S
modified: arch/score/kernel/process.c
modified: arch/score/kernel/ptrace.c
modified: arch/score/kernel/signal.c
modified: arch/score/kernel/sys_score.c
modified: arch/score/kernel/traps.c
modified: arch/score/mm/cache.c
Signed-off-by: Chen Liqin <liqin.chen@sunplusct.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r-- | arch/score/include/asm/cacheflush.h | 18 | ||||
-rw-r--r-- | arch/score/include/asm/delay.h | 11 | ||||
-rw-r--r-- | arch/score/include/asm/errno.h | 1 | ||||
-rw-r--r-- | arch/score/include/asm/pgtable-bits.h | 2 | ||||
-rw-r--r-- | arch/score/include/asm/pgtable.h | 59 | ||||
-rw-r--r-- | arch/score/include/asm/ptrace.h | 18 | ||||
-rw-r--r-- | arch/score/include/asm/unistd.h | 3 | ||||
-rw-r--r-- | arch/score/kernel/entry.S | 6 | ||||
-rw-r--r-- | arch/score/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/score/kernel/ptrace.c | 144 | ||||
-rw-r--r-- | arch/score/kernel/signal.c | 6 | ||||
-rw-r--r-- | arch/score/kernel/sys_score.c | 7 | ||||
-rw-r--r-- | arch/score/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/score/mm/cache.c | 125 |
14 files changed, 154 insertions, 250 deletions
diff --git a/arch/score/include/asm/cacheflush.h b/arch/score/include/asm/cacheflush.h index 1c74628caf71..07cc8fc457cd 100644 --- a/arch/score/include/asm/cacheflush.h +++ b/arch/score/include/asm/cacheflush.h @@ -4,18 +4,16 @@ /* Keep includes the same across arches. */ #include <linux/mm.h> -extern void (*flush_cache_all)(void); -extern void (*flush_cache_mm)(struct mm_struct *mm); -extern void (*flush_cache_range)(struct vm_area_struct *vma, +extern void flush_cache_all(void); +extern void flush_cache_mm(struct mm_struct *mm); +extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern void (*flush_cache_page)(struct vm_area_struct *vma, +extern void flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); -extern void (*flush_cache_sigtramp)(unsigned long addr); -extern void (*flush_icache_all)(void); -extern void (*flush_icache_range)(unsigned long start, unsigned long end); -extern void (*flush_data_cache_page)(unsigned long addr); - -extern void s7_flush_cache_all(void); +extern void flush_cache_sigtramp(unsigned long addr); +extern void flush_icache_all(void); +extern void flush_icache_range(unsigned long start, unsigned long end); +extern void flush_dcache_range(unsigned long start, unsigned long end); #define flush_cache_dup_mm(mm) do {} while (0) #define flush_dcache_page(page) do {} while (0) diff --git a/arch/score/include/asm/delay.h b/arch/score/include/asm/delay.h index ad716f6d922d..6726ec199dc0 100644 --- a/arch/score/include/asm/delay.h +++ b/arch/score/include/asm/delay.h @@ -3,17 +3,22 @@ static inline void __delay(unsigned long loops) { + /* 3 cycles per loop. */ __asm__ __volatile__ ( - "1:\tsubi\t%0,1\n\t" + "1:\tsubi\t%0, 3\n\t" "cmpz.c\t%0\n\t" - "bne\t1b\n\t" + "ble\t1b\n\t" : "=r" (loops) : "0" (loops)); } static inline void __udelay(unsigned long usecs) { - __delay(usecs); + unsigned long loops_per_usec; + + loops_per_usec = (loops_per_jiffy * HZ) / 1000000; + + __delay(usecs * loops_per_usec); } #define udelay(usecs) __udelay(usecs) diff --git a/arch/score/include/asm/errno.h b/arch/score/include/asm/errno.h index 7cd3e1f07c0b..29ff39d5ab47 100644 --- a/arch/score/include/asm/errno.h +++ b/arch/score/include/asm/errno.h @@ -2,6 +2,5 @@ #define _ASM_SCORE_ERRNO_H #include <asm-generic/errno.h> -#define EMAXERRNO 1024 #endif /* _ASM_SCORE_ERRNO_H */ diff --git a/arch/score/include/asm/pgtable-bits.h b/arch/score/include/asm/pgtable-bits.h index ca16d357a644..7d65a96a82e5 100644 --- a/arch/score/include/asm/pgtable-bits.h +++ b/arch/score/include/asm/pgtable-bits.h @@ -17,6 +17,8 @@ #define _CACHE_MASK (1<<3) #define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */ +#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) +#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) #define _PAGE_CHG_MASK \ (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE) diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 0f7177a42205..5e913e57c671 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -106,24 +106,6 @@ static inline void pmd_clear(pmd_t *pmdp) ((swp_entry_t) { pte_val(pte)}) #define __swp_entry_to_pte(x) ((pte_t) {(x).val}) -#define __P000 __pgprot(0) -#define __P001 __pgprot(0) -#define __P010 __pgprot(0) -#define __P011 __pgprot(0) -#define __P100 __pgprot(0) -#define __P101 __pgprot(0) -#define __P110 __pgprot(0) -#define __P111 __pgprot(0) - -#define __S000 __pgprot(0) -#define __S001 __pgprot(0) -#define __S010 __pgprot(0) -#define __S011 __pgprot(0) -#define __S100 __pgprot(0) -#define __S101 __pgprot(0) -#define __S110 __pgprot(0) -#define __S111 __pgprot(0) - #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) static inline pte_t pte_mkspecial(pte_t pte) { return pte; } @@ -136,10 +118,15 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) -#define pgd_present(pgd) (1) /* pages are always present on non MMU */ +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +#define pgd_present(pgd) (1) #define pgd_none(pgd) (0) #define pgd_bad(pgd) (0) -#define pgd_clear(pgdp) +#define pgd_clear(pgdp) do { } while (0) #define kern_addr_valid(addr) (1) #define pmd_offset(a, b) ((void *) 0) @@ -150,11 +137,33 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define pud_offset(pgd, address) ((pud_t *) pgd) -#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CACHE) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_CACHE) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_CACHE) +#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ + __WRITEABLE | _PAGE_GLOBAL & ~_PAGE_CACHE) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED #define pgprot_noncached(x) (x) diff --git a/arch/score/include/asm/ptrace.h b/arch/score/include/asm/ptrace.h index 1a4900ac49f3..66b14c8891cf 100644 --- a/arch/score/include/asm/ptrace.h +++ b/arch/score/include/asm/ptrace.h @@ -1,6 +1,9 @@ #ifndef _ASM_SCORE_PTRACE_H #define _ASM_SCORE_PTRACE_H +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 + #define PC 32 #define CONDITION 33 #define ECR 34 @@ -76,12 +79,17 @@ struct pt_regs { */ #define user_mode(regs) ((regs->cp0_psr & 8) == 8) -#define instruction_pointer(regs) (0) -#define profile_pc(regs) instruction_pointer(regs) +#define instruction_pointer(regs) ((unsigned long)(regs)->cp0_epc) +#define profile_pc(regs) instruction_pointer(regs) -extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); +extern void do_syscall_trace(struct pt_regs *regs, int entryexit); extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *); -extern void clear_single_step(struct task_struct *); -#endif +extern int read_tsk_short(struct task_struct *, unsigned long, + unsigned short *); + +#define arch_has_single_step() (1) +extern void user_enable_single_step(struct task_struct *); +extern void user_disable_single_step(struct task_struct *); +#endif /* __KERNEL__ */ #endif /* _ASM_SCORE_PTRACE_H */ diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/asm/unistd.h index 9aa3a159bbf6..f0f84deeb564 100644 --- a/arch/score/include/asm/unistd.h +++ b/arch/score/include/asm/unistd.h @@ -1,7 +1,8 @@ -#ifndef _ASM_SCORE_UNISTD_H +#if !defined(_ASM_SCORE_UNISTD_H) || defined(__SYSCALL) #define _ASM_SCORE_UNISTD_H #define __ARCH_HAVE_MMU +#define __ARCH_WANT_IPC_PARSE_VERSION #include <asm-generic/unistd.h> diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S index 6c6b7ea58afa..0af89b2f16b0 100644 --- a/arch/score/kernel/entry.S +++ b/arch/score/kernel/entry.S @@ -23,7 +23,7 @@ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -#include <linux/errno.h> +#include <linux/err.h> #include <linux/init.h> #include <linux/linkage.h> @@ -434,7 +434,7 @@ stack_done: sw r8, [r0, PT_R7] b 2f 1: - cmpi.c r4, -EMAXERRNO-1 # -EMAXERRNO - 1=-1134 + cmpi.c r4, -MAX_ERRNO - 1 ble 2f ldi r8, 0x1; sw r8, [r0, PT_R7] @@ -466,7 +466,7 @@ syscall_trace_entry: lw r7, [r0, PT_R7] brl r8 - li r8, -EMAXERRNO - 1 # error? + li r8, -MAX_ERRNO - 1 sw r8, [r0, PT_R7] # set error flag neg r4, r4 # error diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index aaa3085251fa..d93966f7ac83 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -56,7 +56,7 @@ void __noreturn cpu_idle(void) } } -asmlinkage void ret_from_fork(void); +void ret_from_fork(void); void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c index 8fe7209355aa..19911e3187be 100644 --- a/arch/score/kernel/ptrace.c +++ b/arch/score/kernel/ptrace.c @@ -80,7 +80,35 @@ write_tsk_long(struct task_struct *child, return copied != sizeof(val) ? -EIO : 0; } -void set_single_step(struct task_struct *child) +/* + * Get all user integer registers. + */ +static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) +{ + struct pt_regs *regs = task_pt_regs(tsk); + + return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; +} + +/* + * Set all user integer registers. + */ +static int ptrace_setregs(struct task_struct *tsk, void __user *uregs) +{ + struct pt_regs newregs; + int ret; + + ret = -EFAULT; + if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { + struct pt_regs *regs = task_pt_regs(tsk); + *regs = newregs; + ret = 0; + } + + return ret; +} + +void user_enable_single_step(struct task_struct *child) { /* far_epc is the target of branch */ unsigned int epc, far_epc = 0; @@ -201,7 +229,7 @@ void set_single_step(struct task_struct *child) } } -void clear_single_step(struct task_struct *child) +void user_disable_single_step(struct task_struct *child) { if (child->thread.insn1_type == 0) write_tsk_short(child, child->thread.addr1, @@ -230,54 +258,17 @@ void clear_single_step(struct task_struct *child) child->thread.ss_nextcnt = 0; } - -void ptrace_disable(struct task_struct *child) {} +void ptrace_disable(struct task_struct *child) +{ + user_disable_single_step(child); +} long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int ret; - if (request == PTRACE_TRACEME) { - /* are we already being traced? */ - if (current->ptrace & PT_PTRACED) - return -EPERM; - - /* set the ptrace bit in the process flags. */ - current->ptrace |= PT_PTRACED; - return 0; - } - - ret = -ESRCH; - if (!child) - return ret; - - ret = -EPERM; - - if (request == PTRACE_ATTACH) { - ret = ptrace_attach(child); - return ret; - } - - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - return ret; - switch (request) { - case PTRACE_PEEKTEXT: /* read word at location addr. */ - case PTRACE_PEEKDATA: { - unsigned long tmp; - int copied; - - copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); - ret = -EIO; - if (copied != sizeof(tmp)) - break; - - ret = put_user(tmp, (unsigned long *) data); - return ret; - } - /* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; @@ -329,15 +320,6 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) return ret; } - case PTRACE_POKETEXT: /* write the word at location addr. */ - case PTRACE_POKEDATA: - ret = 0; - if (access_process_vm(child, addr, &data, sizeof(data), 1) - == sizeof(data)) - break; - ret = -EIO; - return ret; - case PTRACE_POKEUSR: { struct pt_regs *regs; ret = 0; @@ -372,64 +354,16 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) break; } - case PTRACE_SYSCALL: /* continue and stop at next - (return from) syscall. */ - case PTRACE_CONT: { /* restart after signal. */ - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - - child->exit_code = data; - wake_up_process(child); - ret = 0; - break; - } - - /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: - ret = 0; - if (child->state == EXIT_ZOMBIE) /* already dead. */ - break; - child->exit_code = SIGKILL; - clear_single_step(child); - wake_up_process(child); + case PTRACE_GETREGS: + ret = ptrace_getregs(child, (void __user *)data); break; - case PTRACE_SINGLESTEP: { /* set the trap flag. */ - ret = -EIO; - if ((unsigned long) data > _NSIG) - break; - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - set_single_step(child); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; - break; - } - - case PTRACE_DETACH: /* detach a process that was attached. */ - ret = ptrace_detach(child, data); - break; - - case PTRACE_SETOPTIONS: - if (data & PTRACE_O_TRACESYSGOOD) - child->ptrace |= PT_TRACESYSGOOD; - else - child->ptrace &= ~PT_TRACESYSGOOD; - ret = 0; + case PTRACE_SETREGS: + ret = ptrace_setregs(child, (void __user *)data); break; default: - ret = -EIO; + ret = ptrace_request(child, request, addr, data); break; } diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index b4ed1b3f8072..5c004084d17d 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -132,7 +132,7 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, return (void *)((sp - frame_size) & ~7); } -asmlinkage int score_sigaltstack(struct pt_regs *regs) +int score_sigaltstack(struct pt_regs *regs) { const stack_t *uss = (const stack_t *) regs->regs[4]; stack_t *uoss = (stack_t *) regs->regs[5]; @@ -141,7 +141,7 @@ asmlinkage int score_sigaltstack(struct pt_regs *regs) return do_sigaltstack(uss, uoss, usp); } -asmlinkage void score_rt_sigreturn(struct pt_regs *regs) +void score_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; @@ -276,7 +276,7 @@ int handle_signal(unsigned long sig, siginfo_t *info, return ret; } -asmlinkage void do_signal(struct pt_regs *regs) +void do_signal(struct pt_regs *regs) { struct k_sigaction ka; sigset_t *oldset; diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 6a60d1ee5330..68655f4cbce9 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -64,8 +64,7 @@ sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, * Clone a task - this clones the calling program thread. * This is called indirectly via a small wrapper */ -asmlinkage int -score_clone(struct pt_regs *regs) +int score_clone(struct pt_regs *regs) { unsigned long clone_flags; unsigned long newsp; @@ -93,7 +92,7 @@ score_clone(struct pt_regs *regs) * sys_execve() executes a new program. * This is called indirectly via a small wrapper */ -asmlinkage int score_execve(struct pt_regs *regs) +int score_execve(struct pt_regs *regs) { int error; char *filename; @@ -114,7 +113,7 @@ asmlinkage int score_execve(struct pt_regs *regs) * If we ever come here the user sp is bad. Zap the process right away. * Due to the bad stack signaling wouldn't work. */ -asmlinkage void bad_stack(void) +void bad_stack(void) { do_exit(SIGSEGV); } diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c index 957ae9eb3567..0e46fb19a848 100644 --- a/arch/score/kernel/traps.c +++ b/arch/score/kernel/traps.c @@ -252,7 +252,7 @@ asmlinkage void do_ri(struct pt_regs *regs) if (current->thread.single_step == 1) { if ((epc == current->thread.addr1) || (epc == current->thread.addr2)) { - clear_single_step(current); + user_disable_single_step(current); force_sig(SIGTRAP, current); return; } else diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c index 1ebc67f18c6d..dbac9d9dfddd 100644 --- a/arch/score/mm/cache.c +++ b/arch/score/mm/cache.c @@ -32,34 +32,26 @@ #include <asm/mmu_context.h> -/* Cache operations. */ -void (*flush_cache_all)(void); -void (*__flush_cache_all)(void); -void (*flush_cache_mm)(struct mm_struct *mm); -void (*flush_cache_range)(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -void (*flush_cache_page)(struct vm_area_struct *vma, - unsigned long page, unsigned long pfn); -void (*flush_icache_range)(unsigned long start, unsigned long end); -void (*__flush_cache_vmap)(void); -void (*__flush_cache_vunmap)(void); -void (*flush_cache_sigtramp)(unsigned long addr); -void (*flush_data_cache_page)(unsigned long addr); -EXPORT_SYMBOL(flush_data_cache_page); -void (*flush_icache_all)(void); - -/*Score 7 cache operations*/ -static inline void s7___flush_cache_all(void); -static void s7_flush_cache_mm(struct mm_struct *mm); -static void s7_flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -static void s7_flush_cache_page(struct vm_area_struct *vma, - unsigned long page, unsigned long pfn); -static void s7_flush_icache_range(unsigned long start, unsigned long end); -static void s7_flush_cache_sigtramp(unsigned long addr); -static void s7_flush_data_cache_page(unsigned long addr); -static void s7_flush_dcache_range(unsigned long start, unsigned long end); +/* +Just flush entire Dcache!! +You must ensure the page doesn't include instructions, because +the function will not flush the Icache. +The addr must be cache aligned. +*/ +static void flush_data_cache_page(unsigned long addr) +{ + unsigned int i; + for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { + __asm__ __volatile__( + "cache 0x0e, [%0, 0]\n" + "cache 0x1a, [%0, 0]\n" + "nop\n" + : : "r" (addr)); + addr += L1_CACHE_BYTES; + } +} +/* called by update_mmu_cache. */ void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { @@ -74,7 +66,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { addr = (unsigned long) page_address(page); if (exec) - s7_flush_data_cache_page(addr); + flush_data_cache_page(addr); clear_bit(PG_arch_1, &page->flags); } } @@ -101,44 +93,22 @@ static inline void setup_protection_map(void) void __devinit cpu_cache_init(void) { - flush_cache_all = s7_flush_cache_all; - __flush_cache_all = s7___flush_cache_all; - flush_cache_mm = s7_flush_cache_mm; - flush_cache_range = s7_flush_cache_range; - flush_cache_page = s7_flush_cache_page; - flush_icache_range = s7_flush_icache_range; - flush_cache_sigtramp = s7_flush_cache_sigtramp; - flush_data_cache_page = s7_flush_data_cache_page; - setup_protection_map(); } -void s7_flush_icache_all(void) +void flush_icache_all(void) { __asm__ __volatile__( - "la r8, s7_flush_icache_all\n" + "la r8, flush_icache_all\n" "cache 0x10, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" : : : "r8"); } -void s7_flush_dcache_all(void) -{ - __asm__ __volatile__( - "la r8, s7_flush_dcache_all\n" - "cache 0x1f, [r8, 0]\n" - "nop\nnop\nnop\nnop\nnop\nnop\n" - "cache 0x1a, [r8, 0]\n" - "nop\nnop\nnop\nnop\nnop\nnop\n" - : : : "r8"); -} - -void s7_flush_cache_all(void) +void flush_dcache_all(void) { __asm__ __volatile__( - "la r8, s7_flush_cache_all\n" - "cache 0x10, [r8, 0]\n" - "nop\nnop\nnop\nnop\nnop\nnop\n" + "la r8, flush_dcache_all\n" "cache 0x1f, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" "cache 0x1a, [r8, 0]\n" @@ -146,10 +116,10 @@ void s7_flush_cache_all(void) : : : "r8"); } -void s7___flush_cache_all(void) +void flush_cache_all(void) { __asm__ __volatile__( - "la r8, s7_flush_cache_all\n" + "la r8, flush_cache_all\n" "cache 0x10, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" "cache 0x1f, [r8, 0]\n" @@ -159,11 +129,11 @@ void s7___flush_cache_all(void) : : : "r8"); } -static void s7_flush_cache_mm(struct mm_struct *mm) +void flush_cache_mm(struct mm_struct *mm) { if (!(mm->context)) return; - s7_flush_cache_all(); + flush_cache_all(); } /*if we flush a range precisely , the processing may be very long. @@ -176,8 +146,7 @@ The interface is provided in hopes that the port can find a suitably efficient method for removing multiple page sized regions from the cache. */ -static void -s7_flush_cache_range(struct vm_area_struct *vma, +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; @@ -209,27 +178,26 @@ s7_flush_cache_range(struct vm_area_struct *vma, tmpend = (start | (PAGE_SIZE-1)) > end ? end : (start | (PAGE_SIZE-1)); - s7_flush_dcache_range(start, tmpend); + flush_dcache_range(start, tmpend); if (exec) - s7_flush_icache_range(start, tmpend); + flush_icache_range(start, tmpend); start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); } } -static void -s7_flush_cache_page(struct vm_area_struct *vma, +void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int exec = vma->vm_flags & VM_EXEC; unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); - s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE); + flush_dcache_range(kaddr, kaddr + PAGE_SIZE); if (exec) - s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE); + flush_icache_range(kaddr, kaddr + PAGE_SIZE); } -static void s7_flush_cache_sigtramp(unsigned long addr) +void flush_cache_sigtramp(unsigned long addr) { __asm__ __volatile__( "cache 0x02, [%0, 0]\n" @@ -248,30 +216,11 @@ static void s7_flush_cache_sigtramp(unsigned long addr) } /* -Just flush entire Dcache!! -You must ensure the page doesn't include instructions, because -the function will not flush the Icache. -The addr must be cache aligned. -*/ -static void s7_flush_data_cache_page(unsigned long addr) -{ - unsigned int i; - for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { - __asm__ __volatile__( - "cache 0x0e, [%0, 0]\n" - "cache 0x1a, [%0, 0]\n" - "nop\n" - : : "r" (addr)); - addr += L1_CACHE_BYTES; - } -} - -/* 1. WB and invalid a cache line of Dcache 2. Drain Write Buffer the range must be smaller than PAGE_SIZE */ -static void s7_flush_dcache_range(unsigned long start, unsigned long end) +void flush_dcache_range(unsigned long start, unsigned long end) { int size, i; @@ -290,7 +239,7 @@ static void s7_flush_dcache_range(unsigned long start, unsigned long end) } } -static void s7_flush_icache_range(unsigned long start, unsigned long end) +void flush_icache_range(unsigned long start, unsigned long end) { int size, i; start = start & ~(L1_CACHE_BYTES - 1); |