diff options
Diffstat (limited to 'arch/mips/lib/memcpy.S')
-rw-r--r-- | arch/mips/lib/memcpy.S | 250 |
1 files changed, 147 insertions, 103 deletions
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index a526c62cb76a..c06cccf60bec 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -9,6 +9,7 @@ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. * Copyright (C) 2002 Broadcom, Inc. * memcpy/copy_user author: Mark Vandevoorde + * Copyright (C) 2007 Maciej W. Rozycki * * Mnemonic names for arguments to memcpy/__copy_user */ @@ -175,7 +176,11 @@ .text .set noreorder +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS .set noat +#else + .set at=v1 +#endif /* * A combined memcpy/__copy_user @@ -186,7 +191,7 @@ .align 5 LEAF(memcpy) /* a0=dst a1=src a2=len */ move v0, dst /* return value */ -__memcpy: +.L__memcpy: FEXPORT(__copy_user) /* * Note: dst & src may be unaligned, len may be 0 @@ -194,6 +199,7 @@ FEXPORT(__copy_user) */ #define rem t8 + R10KCBARRIER(0(ra)) /* * The "issue break"s below are very approximate. * Issue delays for dcache fills will perturb the schedule, as will @@ -207,44 +213,45 @@ FEXPORT(__copy_user) and t1, dst, ADDRMASK PREF( 0, 1*32(src) ) PREF( 1, 1*32(dst) ) - bnez t2, copy_bytes_checklen + bnez t2, .Lcopy_bytes_checklen and t0, src, ADDRMASK PREF( 0, 2*32(src) ) PREF( 1, 2*32(dst) ) - bnez t1, dst_unaligned + bnez t1, .Ldst_unaligned nop - bnez t0, src_unaligned_dst_aligned + bnez t0, .Lsrc_unaligned_dst_aligned /* * use delay slot for fall-through * src and dst are aligned; need to compute rem */ -both_aligned: +.Lboth_aligned: SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter - beqz t0, cleanup_both_aligned # len < 8*NBYTES + beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) PREF( 0, 3*32(src) ) PREF( 1, 3*32(dst) ) .align 4 1: -EXC( LOAD t0, UNIT(0)(src), l_exc) -EXC( LOAD t1, UNIT(1)(src), l_exc_copy) -EXC( LOAD t2, UNIT(2)(src), l_exc_copy) -EXC( LOAD t3, UNIT(3)(src), l_exc_copy) + R10KCBARRIER(0(ra)) +EXC( LOAD t0, UNIT(0)(src), .Ll_exc) +EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) +EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) +EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) SUB len, len, 8*NBYTES -EXC( LOAD t4, UNIT(4)(src), l_exc_copy) -EXC( LOAD t7, UNIT(5)(src), l_exc_copy) -EXC( STORE t0, UNIT(0)(dst), s_exc_p8u) -EXC( STORE t1, UNIT(1)(dst), s_exc_p7u) -EXC( LOAD t0, UNIT(6)(src), l_exc_copy) -EXC( LOAD t1, UNIT(7)(src), l_exc_copy) +EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy) +EXC( LOAD t7, UNIT(5)(src), .Ll_exc_copy) +EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p8u) +EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p7u) +EXC( LOAD t0, UNIT(6)(src), .Ll_exc_copy) +EXC( LOAD t1, UNIT(7)(src), .Ll_exc_copy) ADD src, src, 8*NBYTES ADD dst, dst, 8*NBYTES -EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) -EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) -EXC( STORE t4, UNIT(-4)(dst), s_exc_p4u) -EXC( STORE t7, UNIT(-3)(dst), s_exc_p3u) -EXC( STORE t0, UNIT(-2)(dst), s_exc_p2u) -EXC( STORE t1, UNIT(-1)(dst), s_exc_p1u) +EXC( STORE t2, UNIT(-6)(dst), .Ls_exc_p6u) +EXC( STORE t3, UNIT(-5)(dst), .Ls_exc_p5u) +EXC( STORE t4, UNIT(-4)(dst), .Ls_exc_p4u) +EXC( STORE t7, UNIT(-3)(dst), .Ls_exc_p3u) +EXC( STORE t0, UNIT(-2)(dst), .Ls_exc_p2u) +EXC( STORE t1, UNIT(-1)(dst), .Ls_exc_p1u) PREF( 0, 8*32(src) ) PREF( 1, 8*32(dst) ) bne len, rem, 1b @@ -253,39 +260,45 @@ EXC( STORE t1, UNIT(-1)(dst), s_exc_p1u) /* * len == rem == the number of bytes left to copy < 8*NBYTES */ -cleanup_both_aligned: - beqz len, done +.Lcleanup_both_aligned: + beqz len, .Ldone sltu t0, len, 4*NBYTES - bnez t0, less_than_4units + bnez t0, .Lless_than_4units and rem, len, (NBYTES-1) # rem = len % NBYTES /* * len >= 4*NBYTES */ -EXC( LOAD t0, UNIT(0)(src), l_exc) -EXC( LOAD t1, UNIT(1)(src), l_exc_copy) -EXC( LOAD t2, UNIT(2)(src), l_exc_copy) -EXC( LOAD t3, UNIT(3)(src), l_exc_copy) +EXC( LOAD t0, UNIT(0)(src), .Ll_exc) +EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) +EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) +EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES -EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) -EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) -EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) -EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) - beqz len, done - ADD dst, dst, 4*NBYTES -less_than_4units: + R10KCBARRIER(0(ra)) +EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) +EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) +EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) +EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES + beqz len, .Ldone + .set noreorder +.Lless_than_4units: /* * rem = len % NBYTES */ - beq rem, len, copy_bytes + beq rem, len, .Lcopy_bytes nop 1: -EXC( LOAD t0, 0(src), l_exc) + R10KCBARRIER(0(ra)) +EXC( LOAD t0, 0(src), .Ll_exc) ADD src, src, NBYTES SUB len, len, NBYTES -EXC( STORE t0, 0(dst), s_exc_p1u) +EXC( STORE t0, 0(dst), .Ls_exc_p1u) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne rem, len, 1b - ADD dst, dst, NBYTES + .set noreorder /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) @@ -299,17 +312,17 @@ EXC( STORE t0, 0(dst), s_exc_p1u) * more instruction-level parallelism. */ #define bits t2 - beqz len, done + beqz len, .Ldone ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep -EXC( LOAD t0, 0(src), l_exc) +EXC( LOAD t0, 0(src), .Ll_exc) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits -EXC( STREST t0, -1(t1), s_exc) +EXC( STREST t0, -1(t1), .Ls_exc) jr ra move len, zero -dst_unaligned: +.Ldst_unaligned: /* * dst is unaligned * t0 = src & ADDRMASK @@ -320,22 +333,23 @@ dst_unaligned: * Set match = (src and dst have same alignment) */ #define match rem -EXC( LDFIRST t3, FIRST(0)(src), l_exc) +EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) ADD t2, zero, NBYTES -EXC( LDREST t3, REST(0)(src), l_exc_copy) +EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 -EXC( STFIRST t3, FIRST(0)(dst), s_exc) - beq len, t2, done + R10KCBARRIER(0(ra)) +EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) + beq len, t2, .Ldone SUB len, len, t2 ADD dst, dst, t2 - beqz match, both_aligned + beqz match, .Lboth_aligned ADD src, src, t2 -src_unaligned_dst_aligned: +.Lsrc_unaligned_dst_aligned: SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter PREF( 0, 3*32(src) ) - beqz t0, cleanup_src_unaligned + beqz t0, .Lcleanup_src_unaligned and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES PREF( 1, 3*32(dst) ) 1: @@ -345,52 +359,59 @@ src_unaligned_dst_aligned: * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ -EXC( LDFIRST t0, FIRST(0)(src), l_exc) -EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) + R10KCBARRIER(0(ra)) +EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) +EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) SUB len, len, 4*NBYTES -EXC( LDREST t0, REST(0)(src), l_exc_copy) -EXC( LDREST t1, REST(1)(src), l_exc_copy) -EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) -EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) -EXC( LDREST t2, REST(2)(src), l_exc_copy) -EXC( LDREST t3, REST(3)(src), l_exc_copy) +EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) +EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) +EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) +EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) +EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) +EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif -EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) -EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) -EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) -EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) +EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) +EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) +EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) +EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES bne len, rem, 1b - ADD dst, dst, 4*NBYTES + .set noreorder -cleanup_src_unaligned: - beqz len, done +.Lcleanup_src_unaligned: + beqz len, .Ldone and rem, len, NBYTES-1 # rem = len % NBYTES - beq rem, len, copy_bytes + beq rem, len, .Lcopy_bytes nop 1: -EXC( LDFIRST t0, FIRST(0)(src), l_exc) -EXC( LDREST t0, REST(0)(src), l_exc_copy) + R10KCBARRIER(0(ra)) +EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) +EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) ADD src, src, NBYTES SUB len, len, NBYTES -EXC( STORE t0, 0(dst), s_exc_p1u) +EXC( STORE t0, 0(dst), .Ls_exc_p1u) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne len, rem, 1b - ADD dst, dst, NBYTES + .set noreorder -copy_bytes_checklen: - beqz len, done +.Lcopy_bytes_checklen: + beqz len, .Ldone nop -copy_bytes: +.Lcopy_bytes: /* 0 < len < NBYTES */ + R10KCBARRIER(0(ra)) #define COPY_BYTE(N) \ -EXC( lb t0, N(src), l_exc); \ +EXC( lb t0, N(src), .Ll_exc); \ SUB len, len, 1; \ - beqz len, done; \ -EXC( sb t0, N(dst), s_exc_p1) + beqz len, .Ldone; \ +EXC( sb t0, N(dst), .Ls_exc_p1) COPY_BYTE(0) COPY_BYTE(1) @@ -400,16 +421,16 @@ EXC( sb t0, N(dst), s_exc_p1) COPY_BYTE(4) COPY_BYTE(5) #endif -EXC( lb t0, NBYTES-2(src), l_exc) +EXC( lb t0, NBYTES-2(src), .Ll_exc) SUB len, len, 1 jr ra -EXC( sb t0, NBYTES-2(dst), s_exc_p1) -done: +EXC( sb t0, NBYTES-2(dst), .Ls_exc_p1) +.Ldone: jr ra nop END(memcpy) -l_exc_copy: +.Ll_exc_copy: /* * Copy bytes from src until faulting load address (or until a * lb faults) @@ -424,12 +445,14 @@ l_exc_copy: nop LOAD t0, THREAD_BUADDR(t0) 1: -EXC( lb t1, 0(src), l_exc) +EXC( lb t1, 0(src), .Ll_exc) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user + .set reorder /* DADDI_WAR */ + ADD dst, dst, 1 bne src, t0, 1b - ADD dst, dst, 1 -l_exc: + .set noreorder +.Ll_exc: LOAD t0, TI_TASK($28) nop LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address @@ -446,20 +469,33 @@ l_exc: * Clear len bytes starting at dst. Can't call __bzero because it * might modify len. An inefficient loop for these rare times... */ - beqz len, done - SUB src, len, 1 + .set reorder /* DADDI_WAR */ + SUB src, len, 1 + beqz len, .Ldone + .set noreorder 1: sb zero, 0(dst) ADD dst, dst, 1 +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS bnez src, 1b SUB src, src, 1 +#else + .set push + .set noat + li v1, 1 + bnez src, 1b + SUB src, src, v1 + .set pop +#endif jr ra nop -#define SEXC(n) \ -s_exc_p ## n ## u: \ - jr ra; \ - ADD len, len, n*NBYTES +#define SEXC(n) \ + .set reorder; /* DADDI_WAR */ \ +.Ls_exc_p ## n ## u: \ + ADD len, len, n*NBYTES; \ + jr ra; \ + .set noreorder SEXC(8) SEXC(7) @@ -470,10 +506,12 @@ SEXC(3) SEXC(2) SEXC(1) -s_exc_p1: +.Ls_exc_p1: + .set reorder /* DADDI_WAR */ + ADD len, len, 1 jr ra - ADD len, len, 1 -s_exc: + .set noreorder +.Ls_exc: jr ra nop @@ -484,38 +522,44 @@ LEAF(memmove) sltu t0, a1, t0 # dst + len <= src -> memcpy sltu t1, a0, t1 # dst >= src + len -> memcpy and t0, t1 - beqz t0, __memcpy + beqz t0, .L__memcpy move v0, a0 /* return value */ - beqz a2, r_out + beqz a2, .Lr_out END(memmove) /* fall through to __rmemcpy */ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ sltu t0, a1, a0 - beqz t0, r_end_bytes_up # src >= dst + beqz t0, .Lr_end_bytes_up # src >= dst nop ADD a0, a2 # dst = dst + len ADD a1, a2 # src = src + len -r_end_bytes: +.Lr_end_bytes: + R10KCBARRIER(0(ra)) lb t0, -1(a1) SUB a2, a2, 0x1 sb t0, -1(a0) SUB a1, a1, 0x1 - bnez a2, r_end_bytes - SUB a0, a0, 0x1 + .set reorder /* DADDI_WAR */ + SUB a0, a0, 0x1 + bnez a2, .Lr_end_bytes + .set noreorder -r_out: +.Lr_out: jr ra move a2, zero -r_end_bytes_up: +.Lr_end_bytes_up: + R10KCBARRIER(0(ra)) lb t0, (a1) SUB a2, a2, 0x1 sb t0, (a0) ADD a1, a1, 0x1 - bnez a2, r_end_bytes_up - ADD a0, a0, 0x1 + .set reorder /* DADDI_WAR */ + ADD a0, a0, 0x1 + bnez a2, .Lr_end_bytes_up + .set noreorder jr ra move a2, zero |