summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAlan Cox <alan@lxorguk.ukuu.org.uk>2006-01-18 17:44:07 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-18 19:20:30 -0800
commit715b49ef2de6fcead0776d9349071670282faf65 (patch)
treed09b77c804aba3b191dc0ceb294387cf730ede4b /include
parent3213e913b0d6baeb28aa1affbdd4bfa7efedc35f (diff)
downloadblackbird-op-linux-715b49ef2de6fcead0776d9349071670282faf65.tar.gz
blackbird-op-linux-715b49ef2de6fcead0776d9349071670282faf65.zip
[PATCH] EDAC: atomic scrub operations
EDAC requires a way to scrub memory if an ECC error is found and the chipset does not do the work automatically. That means rewriting memory locations atomically with respect to all CPUs _and_ bus masters. That means we can't use atomic_add(foo, 0) as it gets optimised for non-SMP This adds a function to include/asm-foo/atomic.h for the platforms currently supported which implements a scrub of a mapped block. It also adjusts a few other files include order where atomic.h is included before types.h as this now causes an error as atomic_scrub uses u32. Signed-off-by: Alan Cox <alan@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/atomic.h12
-rw-r--r--include/asm-x86_64/atomic.h12
2 files changed, 24 insertions, 0 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index de649d3aa2d4..e2c00c95a5e1 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -255,5 +255,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static __inline__ void atomic_scrub(unsigned long *virt_addr, u32 size)
+{
+ u32 i;
+ for (i = 0; i < size / 4; i++, virt_addr++)
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+}
+
#include <asm-generic/atomic.h>
#endif
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 4b5cd553e772..4048508c4f40 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -426,5 +426,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static __inline__ void atomic_scrub(u32 *virt_addr, u32 size)
+{
+ u32 i;
+ for (i = 0; i < size / 4; i++, virt_addr++)
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+}
+
#include <asm-generic/atomic.h>
#endif
OpenPOWER on IntegriCloud