diff options
author | Fenghua Yu <fenghua.yu@intel.com> | 2007-07-19 01:48:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 10:04:45 -0700 |
commit | f34e3b61f2be9628bd41244f3ecc42009c5eced5 (patch) | |
tree | 192a64c12f84b3d69b9bf12ba56c2c7d86bc269b /arch | |
parent | 5fb7dc37dc16fbc8b80d81318a582201ef7e280d (diff) |
use the new percpu interface for shared data
Currently most of the per cpu data, which is accessed by different cpus,
has a ____cacheline_aligned_in_smp attribute. Move all this data to the
new per cpu shared data section: .data.percpu.shared_aligned.
This will seperate the percpu data which is referenced frequently by other
cpus from the local only percpu data.
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/init_task.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/x86_64/kernel/init_task.c | 2 |
4 files changed, 4 insertions, 4 deletions
diff --git a/arch/i386/kernel/init_task.c b/arch/i386/kernel/init_task.c index cff95d10a4d..d26fc063a76 100644 --- a/arch/i386/kernel/init_task.c +++ b/arch/i386/kernel/init_task.c @@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task); * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. */ -DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; +DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index d2daf672f4a..ba44d40b066 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c @@ -21,7 +21,7 @@ #include <asm/apic.h> #include <asm/uaccess.h> -DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); DEFINE_PER_CPU(struct pt_regs *, irq_regs); diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index b3a47f986e1..9f72838db26 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -82,7 +82,7 @@ static volatile struct call_data_struct *call_data; #define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ -static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; +static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); extern void cpu_halt (void); diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c index 3dc5854ba21..4ff33d4f855 100644 --- a/arch/x86_64/kernel/init_task.c +++ b/arch/x86_64/kernel/init_task.c @@ -44,7 +44,7 @@ EXPORT_SYMBOL(init_task); * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; +DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; /* Copies of the original ist values from the tss are only accessed during * debugging, no special alignment required. |