From 715b49ef2de6fcead0776d9349071670282faf65 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Wed, 18 Jan 2006 17:44:07 -0800 Subject: [PATCH] EDAC: atomic scrub operations EDAC requires a way to scrub memory if an ECC error is found and the chipset does not do the work automatically. That means rewriting memory locations atomically with respect to all CPUs _and_ bus masters. That means we can't use atomic_add(foo, 0) as it gets optimised for non-SMP This adds a function to include/asm-foo/atomic.h for the platforms currently supported which implements a scrub of a mapped block. It also adjusts a few other files include order where atomic.h is included before types.h as this now causes an error as atomic_scrub uses u32. Signed-off-by: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/atomic.h | 12 ++++++++++++ include/asm-x86_64/atomic.h | 12 ++++++++++++ 2 files changed, 24 insertions(+) (limited to 'include') diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index de649d3aa2d..e2c00c95a5e 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -255,5 +255,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +/* ECC atomic, DMA, SMP and interrupt safe scrub function */ + +static __inline__ void atomic_scrub(unsigned long *virt_addr, u32 size) +{ + u32 i; + for (i = 0; i < size / 4; i++, virt_addr++) + /* Very carefully read and write to memory atomically + * so we are interrupt, DMA and SMP safe. + */ + __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); +} + #include #endif diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 4b5cd553e77..4048508c4f4 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -426,5 +426,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +/* ECC atomic, DMA, SMP and interrupt safe scrub function */ + +static __inline__ void atomic_scrub(u32 *virt_addr, u32 size) +{ + u32 i; + for (i = 0; i < size / 4; i++, virt_addr++) + /* Very carefully read and write to memory atomically + * so we are interrupt, DMA and SMP safe. + */ + __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); +} + #include #endif -- cgit v1.2.3