From 2098eec22882e8a50a21eb214df4742b34927dae Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Thu, 7 Jul 2005 17:56:09 -0700 Subject: [PATCH] ppc64: vdso32: fix link errors after recent toolchain changes Patch from , http://sources.redhat.com/bugzilla/show_bug.cgi?id=1042 /usr/bin/ld: arch/ppc64/kernel/vdso32/vdso32.so: The first section in the PT_DYNAMIC segment is not the .dynamic section Signed-off-by: Olaf Hering Acked-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/vdso32/vdso32.lds.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/vdso32/vdso32.lds.S b/arch/ppc64/kernel/vdso32/vdso32.lds.S index 11290c902ba..6f87a916a39 100644 --- a/arch/ppc64/kernel/vdso32/vdso32.lds.S +++ b/arch/ppc64/kernel/vdso32/vdso32.lds.S @@ -40,9 +40,9 @@ SECTIONS .gcc_except_table : { *(.gcc_except_table) } .fixup : { *(.fixup) } - .got ALIGN(4) : { *(.got.plt) *(.got) } - .dynamic : { *(.dynamic) } :text :dynamic + .got : { *(.got) } + .plt : { *(.plt) } _end = .; __end = .; -- cgit v1.2.3 From 315a699851722a6bc31e35f91562f31f55d4c4a2 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:11 -0700 Subject: [PATCH] ppc64: use c99 initialisers in cputable code Use c99 initialisers in the cputable code. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/cputable.c | 365 +++++++++++++++++++++++++------------------ 1 file changed, 212 insertions(+), 153 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c index 1d162c7c59d..c301366176e 100644 --- a/arch/ppc64/kernel/cputable.c +++ b/arch/ppc64/kernel/cputable.c @@ -49,160 +49,219 @@ extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec); #endif struct cpu_spec cpu_specs[] = { - { /* Power3 */ - 0xffff0000, 0x00400000, "POWER3 (630)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_IABR | CPU_FTR_PMC8, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power3, - COMMON_PPC64_FW - }, - { /* Power3+ */ - 0xffff0000, 0x00410000, "POWER3 (630+)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_IABR | CPU_FTR_PMC8, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power3, - COMMON_PPC64_FW - }, - { /* Northstar */ - 0xffff0000, 0x00330000, "RS64-II (northstar)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power3, - COMMON_PPC64_FW - }, - { /* Pulsar */ - 0xffff0000, 0x00340000, "RS64-III (pulsar)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power3, - COMMON_PPC64_FW - }, - { /* I-star */ - 0xffff0000, 0x00360000, "RS64-III (icestar)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power3, - COMMON_PPC64_FW - }, - { /* S-star */ - 0xffff0000, 0x00370000, "RS64-IV (sstar)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power3, - COMMON_PPC64_FW - }, - { /* Power4 */ - 0xffff0000, 0x00350000, "POWER4 (gp)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power4, - COMMON_PPC64_FW - }, - { /* Power4+ */ - 0xffff0000, 0x00380000, "POWER4+ (gq)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power4, - COMMON_PPC64_FW - }, - { /* PPC970 */ - 0xffff0000, 0x00390000, "PPC970", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA, - COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP, - 128, 128, - __setup_cpu_ppc970, - COMMON_PPC64_FW - }, - { /* PPC970FX */ - 0xffff0000, 0x003c0000, "PPC970FX", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA, - COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP, - 128, 128, - __setup_cpu_ppc970, - COMMON_PPC64_FW - }, - { /* Power5 */ - 0xffff0000, 0x003a0000, "POWER5 (gr)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT | - CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | - CPU_FTR_MMCRA_SIHV, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power4, - COMMON_PPC64_FW - }, - { /* Power5 */ - 0xffff0000, 0x003b0000, "POWER5 (gs)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT | - CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | - CPU_FTR_MMCRA_SIHV, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power4, - COMMON_PPC64_FW - }, - { /* BE DD1.x */ - 0xffff0000, 0x00700000, "Broadband Engine", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_SMT, - COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP, - 128, 128, - __setup_cpu_be, - COMMON_PPC64_FW - }, - { /* default match */ - 0x00000000, 0x00000000, "POWER4 (compatible)", - CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | - CPU_FTR_PPCAS_ARCH_V2, - COMMON_USER_PPC64, - 128, 128, - __setup_cpu_power4, - COMMON_PPC64_FW - } + { /* Power3 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00400000, + .cpu_name = "POWER3 (630)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | + CPU_FTR_PMC8, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power3, + .firmware_features = COMMON_PPC64_FW, + }, + { /* Power3+ */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00410000, + .cpu_name = "POWER3 (630+)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | + CPU_FTR_PMC8, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power3, + .firmware_features = COMMON_PPC64_FW, + }, + { /* Northstar */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00330000, + .cpu_name = "RS64-II (northstar)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | + CPU_FTR_PMC8 | CPU_FTR_MMCRA, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power3, + .firmware_features = COMMON_PPC64_FW, + }, + { /* Pulsar */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00340000, + .cpu_name = "RS64-III (pulsar)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | + CPU_FTR_PMC8 | CPU_FTR_MMCRA, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power3, + .firmware_features = COMMON_PPC64_FW, + }, + { /* I-star */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00360000, + .cpu_name = "RS64-III (icestar)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | + CPU_FTR_PMC8 | CPU_FTR_MMCRA, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power3, + .firmware_features = COMMON_PPC64_FW, + }, + { /* S-star */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00370000, + .cpu_name = "RS64-IV (sstar)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | + CPU_FTR_PMC8 | CPU_FTR_MMCRA, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power3, + .firmware_features = COMMON_PPC64_FW, + }, + { /* Power4 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00350000, + .cpu_name = "POWER4 (gp)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power4, + .firmware_features = COMMON_PPC64_FW, + }, + { /* Power4+ */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00380000, + .cpu_name = "POWER4+ (gq)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power4, + .firmware_features = COMMON_PPC64_FW, + }, + { /* PPC970 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00390000, + .cpu_name = "PPC970", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | + CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA, + .cpu_user_features = COMMON_USER_PPC64 | + PPC_FEATURE_HAS_ALTIVEC_COMP, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_ppc970, + .firmware_features = COMMON_PPC64_FW, + }, + { /* PPC970FX */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003c0000, + .cpu_name = "PPC970FX", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | + CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA, + .cpu_user_features = COMMON_USER_PPC64 | + PPC_FEATURE_HAS_ALTIVEC_COMP, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_ppc970, + .firmware_features = COMMON_PPC64_FW, + }, + { /* Power5 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003a0000, + .cpu_name = "POWER5 (gr)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT | + CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | + CPU_FTR_MMCRA_SIHV, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power4, + .firmware_features = COMMON_PPC64_FW, + }, + { /* Power5 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003b0000, + .cpu_name = "POWER5 (gs)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT | + CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | + CPU_FTR_MMCRA_SIHV, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power4, + .firmware_features = COMMON_PPC64_FW, + }, + { /* BE DD1.x */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00700000, + .cpu_name = "Broadband Engine", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | + CPU_FTR_SMT, + .cpu_user_features = COMMON_USER_PPC64 | + PPC_FEATURE_HAS_ALTIVEC_COMP, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_be, + .firmware_features = COMMON_PPC64_FW, + }, + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "POWER4 (compatible)", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | + CPU_FTR_PPCAS_ARCH_V2, + .cpu_user_features = COMMON_USER_PPC64, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power4, + .firmware_features = COMMON_PPC64_FW, + } }; firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = { - {FW_FEATURE_PFT, "hcall-pft"}, - {FW_FEATURE_TCE, "hcall-tce"}, - {FW_FEATURE_SPRG0, "hcall-sprg0"}, - {FW_FEATURE_DABR, "hcall-dabr"}, - {FW_FEATURE_COPY, "hcall-copy"}, - {FW_FEATURE_ASR, "hcall-asr"}, - {FW_FEATURE_DEBUG, "hcall-debug"}, - {FW_FEATURE_PERF, "hcall-perf"}, - {FW_FEATURE_DUMP, "hcall-dump"}, - {FW_FEATURE_INTERRUPT, "hcall-interrupt"}, - {FW_FEATURE_MIGRATE, "hcall-migrate"}, - {FW_FEATURE_PERFMON, "hcall-perfmon"}, - {FW_FEATURE_CRQ, "hcall-crq"}, - {FW_FEATURE_VIO, "hcall-vio"}, - {FW_FEATURE_RDMA, "hcall-rdma"}, - {FW_FEATURE_LLAN, "hcall-lLAN"}, - {FW_FEATURE_BULK, "hcall-bulk"}, - {FW_FEATURE_XDABR, "hcall-xdabr"}, - {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, - {FW_FEATURE_SPLPAR, "hcall-splpar"}, + {FW_FEATURE_PFT, "hcall-pft"}, + {FW_FEATURE_TCE, "hcall-tce"}, + {FW_FEATURE_SPRG0, "hcall-sprg0"}, + {FW_FEATURE_DABR, "hcall-dabr"}, + {FW_FEATURE_COPY, "hcall-copy"}, + {FW_FEATURE_ASR, "hcall-asr"}, + {FW_FEATURE_DEBUG, "hcall-debug"}, + {FW_FEATURE_PERF, "hcall-perf"}, + {FW_FEATURE_DUMP, "hcall-dump"}, + {FW_FEATURE_INTERRUPT, "hcall-interrupt"}, + {FW_FEATURE_MIGRATE, "hcall-migrate"}, + {FW_FEATURE_PERFMON, "hcall-perfmon"}, + {FW_FEATURE_CRQ, "hcall-crq"}, + {FW_FEATURE_VIO, "hcall-vio"}, + {FW_FEATURE_RDMA, "hcall-rdma"}, + {FW_FEATURE_LLAN, "hcall-lLAN"}, + {FW_FEATURE_BULK, "hcall-bulk"}, + {FW_FEATURE_XDABR, "hcall-xdabr"}, + {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, + {FW_FEATURE_SPLPAR, "hcall-splpar"}, }; -- cgit v1.2.3 From a2f7a9ce2a5c3d21cc0eb37a03da603b44ba4b09 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:11 -0700 Subject: [PATCH] ppc64: Fix runlatch code to work on pseries machines Not all ppc64 CPUs have the CTRL SPR, so we need a cputable feature for it. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/cputable.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c index c301366176e..8d4c46f6f0b 100644 --- a/arch/ppc64/kernel/cputable.c +++ b/arch/ppc64/kernel/cputable.c @@ -81,7 +81,7 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "RS64-II (northstar)", .cpu_features = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | - CPU_FTR_PMC8 | CPU_FTR_MMCRA, + CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL, .cpu_user_features = COMMON_USER_PPC64, .icache_bsize = 128, .dcache_bsize = 128, @@ -94,7 +94,7 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "RS64-III (pulsar)", .cpu_features = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | - CPU_FTR_PMC8 | CPU_FTR_MMCRA, + CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL, .cpu_user_features = COMMON_USER_PPC64, .icache_bsize = 128, .dcache_bsize = 128, @@ -107,7 +107,7 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "RS64-III (icestar)", .cpu_features = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | - CPU_FTR_PMC8 | CPU_FTR_MMCRA, + CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL, .cpu_user_features = COMMON_USER_PPC64, .icache_bsize = 128, .dcache_bsize = 128, @@ -120,7 +120,7 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "RS64-IV (sstar)", .cpu_features = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | - CPU_FTR_PMC8 | CPU_FTR_MMCRA, + CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL, .cpu_user_features = COMMON_USER_PPC64, .icache_bsize = 128, .dcache_bsize = 128, -- cgit v1.2.3 From 8dc4fd87f229414fc38648508aad7def2275fe81 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:12 -0700 Subject: [PATCH] ppc64: Turn runlatch on in exception entry Enable the runlatch at the start of each exception. Unfortunately we are out of space in the 0x300 handler, so I added it a bit later. The SPR write is fairly expensive, perhaps we should cache the runlatch state in the paca and avoid the write when possible. We don't need to turn the runlatch off, we do that in the idle loop. Better to take the hit in the idle loop than for each exception exit. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/head.S | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S index 675c2708588..93ebcac0d5a 100644 --- a/arch/ppc64/kernel/head.S +++ b/arch/ppc64/kernel/head.S @@ -308,6 +308,7 @@ exception_marker: label##_pSeries: \ HMT_MEDIUM; \ mtspr SPRG1,r13; /* save r13 */ \ + RUNLATCH_ON(r13); \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) #define STD_EXCEPTION_ISERIES(n, label, area) \ @@ -315,6 +316,7 @@ label##_pSeries: \ label##_iSeries: \ HMT_MEDIUM; \ mtspr SPRG1,r13; /* save r13 */ \ + RUNLATCH_ON(r13); \ EXCEPTION_PROLOG_ISERIES_1(area); \ EXCEPTION_PROLOG_ISERIES_2; \ b label##_common @@ -324,6 +326,7 @@ label##_iSeries: \ label##_iSeries: \ HMT_MEDIUM; \ mtspr SPRG1,r13; /* save r13 */ \ + RUNLATCH_ON(r13); \ EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ lbz r10,PACAPROCENABLED(r13); \ cmpwi 0,r10,0; \ @@ -393,6 +396,7 @@ __start_interrupts: _machine_check_pSeries: HMT_MEDIUM mtspr SPRG1,r13 /* save r13 */ + RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) . = 0x300 @@ -419,6 +423,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) data_access_slb_pSeries: HMT_MEDIUM mtspr SPRG1,r13 + RUNLATCH_ON(r13) mfspr r13,SPRG3 /* get paca address into r13 */ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ std r10,PACA_EXSLB+EX_R10(r13) @@ -439,6 +444,7 @@ data_access_slb_pSeries: instruction_access_slb_pSeries: HMT_MEDIUM mtspr SPRG1,r13 + RUNLATCH_ON(r13) mfspr r13,SPRG3 /* get paca address into r13 */ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ std r10,PACA_EXSLB+EX_R10(r13) @@ -464,6 +470,7 @@ instruction_access_slb_pSeries: .globl system_call_pSeries system_call_pSeries: HMT_MEDIUM + RUNLATCH_ON(r9) mr r9,r13 mfmsr r10 mfspr r13,SPRG3 @@ -707,11 +714,13 @@ fwnmi_data_area: system_reset_fwnmi: HMT_MEDIUM mtspr SPRG1,r13 /* save r13 */ + RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) .globl machine_check_fwnmi machine_check_fwnmi: HMT_MEDIUM mtspr SPRG1,r13 /* save r13 */ + RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) /* @@ -848,6 +857,7 @@ unrecov_fer: .align 7 .globl data_access_common data_access_common: + RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ mfspr r10,DAR std r10,PACA_EXGEN+EX_DAR(r13) mfspr r10,DSISR -- cgit v1.2.3 From 4416f3968a23e25a257d679227a89710447760ab Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:14 -0700 Subject: [PATCH] ppc64: sys_ppc32.c cleanups Remove some unnecessary includes, an out of date comment and a prototype for sys_timer_create (which is now in syscalls.h) Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/sys_ppc32.c | 27 --------------------------- 1 file changed, 27 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c index 118436e8085..9bd16cef0ed 100644 --- a/arch/ppc64/kernel/sys_ppc32.c +++ b/arch/ppc64/kernel/sys_ppc32.c @@ -30,47 +30,26 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include -#include -#include -#include #include -#include #include -#include #include #include #include #include -#include #include #include #include -#include #include -#include -#include - #include #include #include #include #include #include -#include #include #include #include @@ -350,8 +329,6 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp) return ret; } - -/* These are here just in case some old sparc32 binary calls it. */ asmlinkage long sys32_pause(void) { current->state = TASK_INTERRUPTIBLE; @@ -360,8 +337,6 @@ asmlinkage long sys32_pause(void) return -ERESTARTNOHAND; } - - static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i) { long usec; @@ -1273,8 +1248,6 @@ long ppc32_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, (u64)len_high << 32 | len_low, advice); } -extern asmlinkage long sys_timer_create(clockid_t, sigevent_t __user *, timer_t __user *); - long ppc32_timer_create(clockid_t clock, struct compat_sigevent __user *ev32, timer_t __user *timer_id) -- cgit v1.2.3 From 79c2cc7b6d2cc31cff6a3d8e966a890f0a0d5f7a Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:15 -0700 Subject: [PATCH] ppc64: add ioprio syscalls - Clean up sys32_getpriority comment. - Add ioprio syscalls, and sign extend 32bit versions. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/misc.S | 6 +++++- arch/ppc64/kernel/sys_ppc32.c | 27 +++++++++++++++++---------- 2 files changed, 22 insertions(+), 11 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index f3dea0c5a88..59f4f997381 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S @@ -1124,9 +1124,11 @@ _GLOBAL(sys_call_table32) .llong .compat_sys_mq_getsetattr .llong .compat_sys_kexec_load .llong .sys32_add_key - .llong .sys32_request_key + .llong .sys32_request_key /* 270 */ .llong .compat_sys_keyctl .llong .compat_sys_waitid + .llong .sys32_ioprio_set + .llong .sys32_ioprio_get .balign 8 _GLOBAL(sys_call_table) @@ -1403,3 +1405,5 @@ _GLOBAL(sys_call_table) .llong .sys_request_key /* 270 */ .llong .sys_keyctl .llong .sys_waitid + .llong .sys_ioprio_set + .llong .sys_ioprio_get diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c index 9bd16cef0ed..206619080e6 100644 --- a/arch/ppc64/kernel/sys_ppc32.c +++ b/arch/ppc64/kernel/sys_ppc32.c @@ -822,16 +822,6 @@ asmlinkage long sys32_getpgid(u32 pid) } -/* Note: it is necessary to treat which and who as unsigned ints, - * with the corresponding cast to a signed int to insure that the - * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) - * and the register representation of a signed int (msr in 64-bit mode) is performed. - */ -asmlinkage long sys32_getpriority(u32 which, u32 who) -{ - return sys_getpriority((int)which, (int)who); -} - /* Note: it is necessary to treat pid as an unsigned int, * with the corresponding cast to a signed int to insure that the @@ -1023,6 +1013,11 @@ asmlinkage long sys32_setpgid(u32 pid, u32 pgid) return sys_setpgid((int)pid, (int)pgid); } +long sys32_getpriority(u32 which, u32 who) +{ + /* sign extend which and who */ + return sys_getpriority((int)which, (int)who); +} long sys32_setpriority(u32 which, u32 who, u32 niceval) { @@ -1030,6 +1025,18 @@ long sys32_setpriority(u32 which, u32 who, u32 niceval) return sys_setpriority((int)which, (int)who, (int)niceval); } +long sys32_ioprio_get(u32 which, u32 who) +{ + /* sign extend which and who */ + return sys_ioprio_get((int)which, (int)who); +} + +long sys32_ioprio_set(u32 which, u32 who, u32 ioprio) +{ + /* sign extend which, who and ioprio */ + return sys_ioprio_set((int)which, (int)who, (int)ioprio); +} + /* Note: it is necessary to treat newmask as an unsigned int, * with the corresponding cast to a signed int to insure that the * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) -- cgit v1.2.3 From d5ee257c3342185ba8ab642d125d192eb99ea8f2 Mon Sep 17 00:00:00 2001 From: Milton Miller Date: Thu, 7 Jul 2005 17:56:24 -0700 Subject: [PATCH] hvc_console: Separate hvc_console and vio code Separate the console setup routines of the hvc_console and the vio layer. Remove the call to find_init_vty from hvc_console.c. Fail the setup routine if the console doesn't exist, but register the console again when the specified channel is instantiated. This scheme maintains the print buffer semantics while eliminating callout and call back for the console code. Signed-off-by: Milton Miller Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/hvconsole.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/hvconsole.c b/arch/ppc64/kernel/hvconsole.c index c72fb8ffe97..94fb06198ea 100644 --- a/arch/ppc64/kernel/hvconsole.c +++ b/arch/ppc64/kernel/hvconsole.c @@ -93,7 +93,7 @@ EXPORT_SYMBOL(hvc_put_chars); * We hope/assume that the first vty found corresponds to the first console * device. */ -int hvc_find_vtys(void) +static int hvc_find_vtys(void) { struct device_node *vty; int num_found = 0; @@ -119,3 +119,4 @@ int hvc_find_vtys(void) return num_found; } +console_initcall(hvc_find_vtys); -- cgit v1.2.3 From acad9559f1054487292eb10d7bb81f256e9d8f2d Mon Sep 17 00:00:00 2001 From: Milton Miller Date: Thu, 7 Jul 2005 17:56:24 -0700 Subject: [PATCH] hvc_console: Separate hvc_console and vio code 2 Remove all the vio device driver code from hvc_console.c This will allow us to separate hvsi, hvc, and allow hvc_console to be used without the ppc64 vio layer. Signed-off-by: Milton Miller Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/hvconsole.c | 33 --------------------------------- 1 file changed, 33 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/hvconsole.c b/arch/ppc64/kernel/hvconsole.c index 94fb06198ea..9d8876d92eb 100644 --- a/arch/ppc64/kernel/hvconsole.c +++ b/arch/ppc64/kernel/hvconsole.c @@ -27,7 +27,6 @@ #include #include #include -#include /** * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper @@ -88,35 +87,3 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count) } EXPORT_SYMBOL(hvc_put_chars); - -/* - * We hope/assume that the first vty found corresponds to the first console - * device. - */ -static int hvc_find_vtys(void) -{ - struct device_node *vty; - int num_found = 0; - - for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL; - vty = of_find_node_by_name(vty, "vty")) { - uint32_t *vtermno; - - /* We have statically defined space for only a certain number of - * console adapters. */ - if (num_found >= MAX_NR_HVC_CONSOLES) - break; - - vtermno = (uint32_t *)get_property(vty, "reg", NULL); - if (!vtermno) - continue; - - if (device_is_compatible(vty, "hvterm1")) { - hvc_instantiate(*vtermno, num_found); - ++num_found; - } - } - - return num_found; -} -console_initcall(hvc_find_vtys); -- cgit v1.2.3 From 70b234a40107596a713e9981c643f2717e31463f Mon Sep 17 00:00:00 2001 From: Milton Miller Date: Thu, 7 Jul 2005 17:56:26 -0700 Subject: [PATCH] hvc_console: Separate the NUL character filtering from get_hvc_chars Separate the NUL character filtering from get_hvc_chars. Signed-off-by: Milton Miller Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/hvconsole.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/hvconsole.c b/arch/ppc64/kernel/hvconsole.c index 9d8876d92eb..138e128a388 100644 --- a/arch/ppc64/kernel/hvconsole.c +++ b/arch/ppc64/kernel/hvconsole.c @@ -41,29 +41,14 @@ int hvc_get_chars(uint32_t vtermno, char *buf, int count) unsigned long got; if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got, - (unsigned long *)buf, (unsigned long *)buf+1) == H_Success) { - /* - * Work around a HV bug where it gives us a null - * after every \r. -- paulus - */ - if (got > 0) { - int i; - for (i = 1; i < got; ++i) { - if (buf[i] == 0 && buf[i-1] == '\r') { - --got; - if (i < got) - memmove(&buf[i], &buf[i+1], - got - i); - } - } - } + (unsigned long *)buf, (unsigned long *)buf+1) == H_Success) return got; - } return 0; } EXPORT_SYMBOL(hvc_get_chars); + /** * hvc_put_chars: send characters to firmware for denoted vterm adapter * @vtermno: The vtermno or unit_address of the adapter from which the data -- cgit v1.2.3 From fd899c0cc725387992ccfc83fb6f70505c36cbeb Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 7 Jul 2005 17:56:28 -0700 Subject: [PATCH] ppc64: Make idle_loop a ppc_md function This patch adds an idle member to the ppc_md structure and calls it from cpu_idle(). If a platform leaves ppc_md.idle as null it will get the default idle loop default_idle(). Signed-off-by: Michael Ellerman Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/idle.c | 8 +++++--- arch/ppc64/kernel/setup.c | 6 +++--- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index 08952c7e621..e270055e73e 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c @@ -33,6 +33,7 @@ #include #include #include +#include extern void power4_idle(void); @@ -122,7 +123,7 @@ static int iSeries_idle(void) #else -static int default_idle(void) +int default_idle(void) { long oldval; unsigned int cpu = smp_processor_id(); @@ -288,7 +289,7 @@ static int shared_idle(void) #endif /* CONFIG_PPC_PSERIES */ -static int native_idle(void) +int native_idle(void) { while(1) { /* check CPU type here */ @@ -308,7 +309,8 @@ static int native_idle(void) void cpu_idle(void) { - idle_loop(); + BUG_ON(NULL == ppc_md.idle_loop); + ppc_md.idle_loop(); } int powersave_nap; diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c index d5e4866e9ac..a278998ecb4 100644 --- a/arch/ppc64/kernel/setup.c +++ b/arch/ppc64/kernel/setup.c @@ -96,7 +96,6 @@ extern void udbg_init_maple_realmode(void); extern unsigned long klimit; extern void mm_init_ppc64(void); -extern int idle_setup(void); extern void stab_initialize(unsigned long stab); extern void htab_initialize(void); extern void early_init_devtree(void *flat_dt); @@ -1081,8 +1080,9 @@ void __init setup_arch(char **cmdline_p) ppc_md.setup_arch(); - /* Select the correct idle loop for the platform. */ - idle_setup(); + /* Use the default idle loop if the platform hasn't provided one. */ + if (NULL == ppc_md.idle_loop) + ppc_md.idle_loop = default_idle; paging_init(); ppc64_boot_msg(0x15, "Setup Done"); -- cgit v1.2.3 From d200903e11f6867b91dffa81b2038e55be599f49 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 7 Jul 2005 17:56:29 -0700 Subject: [PATCH] ppc64: Move iSeries_idle() into iSeries_setup.c Move iSeries_idle() into iSeries_setup.c, no one else needs to know about it. Signed-off-by: Michael Ellerman Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/iSeries_setup.c | 81 ++++++++++++++++++++++++++++++++++++ arch/ppc64/kernel/idle.c | 86 --------------------------------------- 2 files changed, 81 insertions(+), 86 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c index b3f770f6d40..1139e27e171 100644 --- a/arch/ppc64/kernel/iSeries_setup.c +++ b/arch/ppc64/kernel/iSeries_setup.c @@ -834,6 +834,87 @@ static int __init iSeries_src_init(void) late_initcall(iSeries_src_init); +static unsigned long maxYieldTime = 0; +static unsigned long minYieldTime = 0xffffffffffffffffUL; + +static inline void process_iSeries_events(void) +{ + asm volatile ("li 0,0x5555; sc" : : : "r0", "r3"); +} + +static void yield_shared_processor(void) +{ + unsigned long tb; + unsigned long yieldTime; + + HvCall_setEnabledInterrupts(HvCall_MaskIPI | + HvCall_MaskLpEvent | + HvCall_MaskLpProd | + HvCall_MaskTimeout); + + tb = get_tb(); + /* Compute future tb value when yield should expire */ + HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy); + + yieldTime = get_tb() - tb; + if (yieldTime > maxYieldTime) + maxYieldTime = yieldTime; + + if (yieldTime < minYieldTime) + minYieldTime = yieldTime; + + /* + * The decrementer stops during the yield. Force a fake decrementer + * here and let the timer_interrupt code sort out the actual time. + */ + get_paca()->lppaca.int_dword.fields.decr_int = 1; + process_iSeries_events(); +} + +static int iSeries_idle(void) +{ + struct paca_struct *lpaca; + long oldval; + + /* ensure iSeries run light will be out when idle */ + ppc64_runlatch_off(); + + lpaca = get_paca(); + + while (1) { + if (lpaca->lppaca.shared_proc) { + if (hvlpevent_is_pending()) + process_iSeries_events(); + if (!need_resched()) + yield_shared_processor(); + } else { + oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); + + if (!oldval) { + set_thread_flag(TIF_POLLING_NRFLAG); + + while (!need_resched()) { + HMT_medium(); + if (hvlpevent_is_pending()) + process_iSeries_events(); + HMT_low(); + } + + HMT_medium(); + clear_thread_flag(TIF_POLLING_NRFLAG); + } else { + set_need_resched(); + } + } + + ppc64_runlatch_on(); + schedule(); + ppc64_runlatch_off(); + } + + return 0; +} + #ifndef CONFIG_PCI void __init iSeries_init_IRQ(void) { } #endif diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index e270055e73e..22615246779 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c @@ -39,90 +39,6 @@ extern void power4_idle(void); static int (*idle_loop)(void); -#ifdef CONFIG_PPC_ISERIES -static unsigned long maxYieldTime = 0; -static unsigned long minYieldTime = 0xffffffffffffffffUL; - -static inline void process_iSeries_events(void) -{ - asm volatile ("li 0,0x5555; sc" : : : "r0", "r3"); -} - -static void yield_shared_processor(void) -{ - unsigned long tb; - unsigned long yieldTime; - - HvCall_setEnabledInterrupts(HvCall_MaskIPI | - HvCall_MaskLpEvent | - HvCall_MaskLpProd | - HvCall_MaskTimeout); - - tb = get_tb(); - /* Compute future tb value when yield should expire */ - HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy); - - yieldTime = get_tb() - tb; - if (yieldTime > maxYieldTime) - maxYieldTime = yieldTime; - - if (yieldTime < minYieldTime) - minYieldTime = yieldTime; - - /* - * The decrementer stops during the yield. Force a fake decrementer - * here and let the timer_interrupt code sort out the actual time. - */ - get_paca()->lppaca.int_dword.fields.decr_int = 1; - process_iSeries_events(); -} - -static int iSeries_idle(void) -{ - struct paca_struct *lpaca; - long oldval; - - /* ensure iSeries run light will be out when idle */ - ppc64_runlatch_off(); - - lpaca = get_paca(); - - while (1) { - if (lpaca->lppaca.shared_proc) { - if (hvlpevent_is_pending()) - process_iSeries_events(); - if (!need_resched()) - yield_shared_processor(); - } else { - oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); - - if (!oldval) { - set_thread_flag(TIF_POLLING_NRFLAG); - - while (!need_resched()) { - HMT_medium(); - if (hvlpevent_is_pending()) - process_iSeries_events(); - HMT_low(); - } - - HMT_medium(); - clear_thread_flag(TIF_POLLING_NRFLAG); - } else { - set_need_resched(); - } - } - - ppc64_runlatch_on(); - schedule(); - ppc64_runlatch_off(); - } - - return 0; -} - -#else - int default_idle(void) { long oldval; @@ -305,8 +221,6 @@ int native_idle(void) return 0; } -#endif /* CONFIG_PPC_ISERIES */ - void cpu_idle(void) { BUG_ON(NULL == ppc_md.idle_loop); -- cgit v1.2.3 From c66d5dd6b5b62e1435b95c0fb42f6bcddeb395ea Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 7 Jul 2005 17:56:29 -0700 Subject: [PATCH] ppc64: Move pSeries idle functions into pSeries_setup.c dedicated_idle() and shared_idle() are only used by pSeries, so move them into pSeries_setup.c Signed-off-by: Michael Ellerman Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/idle.c | 131 -------------------------------------- arch/ppc64/kernel/pSeries_setup.c | 127 ++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 131 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index 22615246779..69b7c22bad5 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c @@ -74,137 +74,6 @@ int default_idle(void) return 0; } -#ifdef CONFIG_PPC_PSERIES - -DECLARE_PER_CPU(unsigned long, smt_snooze_delay); - -int dedicated_idle(void) -{ - long oldval; - struct paca_struct *lpaca = get_paca(), *ppaca; - unsigned long start_snooze; - unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); - unsigned int cpu = smp_processor_id(); - - ppaca = &paca[cpu ^ 1]; - - while (1) { - /* - * Indicate to the HV that we are idle. Now would be - * a good time to find other work to dispatch. - */ - lpaca->lppaca.idle = 1; - - oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); - if (!oldval) { - set_thread_flag(TIF_POLLING_NRFLAG); - start_snooze = __get_tb() + - *smt_snooze_delay * tb_ticks_per_usec; - while (!need_resched() && !cpu_is_offline(cpu)) { - /* - * Go into low thread priority and possibly - * low power mode. - */ - HMT_low(); - HMT_very_low(); - - if (*smt_snooze_delay == 0 || - __get_tb() < start_snooze) - continue; - - HMT_medium(); - - if (!(ppaca->lppaca.idle)) { - local_irq_disable(); - - /* - * We are about to sleep the thread - * and so wont be polling any - * more. - */ - clear_thread_flag(TIF_POLLING_NRFLAG); - - /* - * SMT dynamic mode. Cede will result - * in this thread going dormant, if the - * partner thread is still doing work. - * Thread wakes up if partner goes idle, - * an interrupt is presented, or a prod - * occurs. Returning from the cede - * enables external interrupts. - */ - if (!need_resched()) - cede_processor(); - else - local_irq_enable(); - } else { - /* - * Give the HV an opportunity at the - * processor, since we are not doing - * any work. - */ - poll_pending(); - } - } - - clear_thread_flag(TIF_POLLING_NRFLAG); - } else { - set_need_resched(); - } - - HMT_medium(); - lpaca->lppaca.idle = 0; - schedule(); - if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) - cpu_die(); - } - return 0; -} - -static int shared_idle(void) -{ - struct paca_struct *lpaca = get_paca(); - unsigned int cpu = smp_processor_id(); - - while (1) { - /* - * Indicate to the HV that we are idle. Now would be - * a good time to find other work to dispatch. - */ - lpaca->lppaca.idle = 1; - - while (!need_resched() && !cpu_is_offline(cpu)) { - local_irq_disable(); - - /* - * Yield the processor to the hypervisor. We return if - * an external interrupt occurs (which are driven prior - * to returning here) or if a prod occurs from another - * processor. When returning here, external interrupts - * are enabled. - * - * Check need_resched() again with interrupts disabled - * to avoid a race. - */ - if (!need_resched()) - cede_processor(); - else - local_irq_enable(); - } - - HMT_medium(); - lpaca->lppaca.idle = 0; - schedule(); - if (cpu_is_offline(smp_processor_id()) && - system_state == SYSTEM_RUNNING) - cpu_die(); - } - - return 0; -} - -#endif /* CONFIG_PPC_PSERIES */ - int native_idle(void) { while(1) { diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c index 44d9af72d22..849ed9ba785 100644 --- a/arch/ppc64/kernel/pSeries_setup.c +++ b/arch/ppc64/kernel/pSeries_setup.c @@ -418,6 +418,133 @@ static int __init pSeries_probe(int platform) return 1; } +DECLARE_PER_CPU(unsigned long, smt_snooze_delay); + +int dedicated_idle(void) +{ + long oldval; + struct paca_struct *lpaca = get_paca(), *ppaca; + unsigned long start_snooze; + unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); + unsigned int cpu = smp_processor_id(); + + ppaca = &paca[cpu ^ 1]; + + while (1) { + /* + * Indicate to the HV that we are idle. Now would be + * a good time to find other work to dispatch. + */ + lpaca->lppaca.idle = 1; + + oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); + if (!oldval) { + set_thread_flag(TIF_POLLING_NRFLAG); + start_snooze = __get_tb() + + *smt_snooze_delay * tb_ticks_per_usec; + while (!need_resched() && !cpu_is_offline(cpu)) { + /* + * Go into low thread priority and possibly + * low power mode. + */ + HMT_low(); + HMT_very_low(); + + if (*smt_snooze_delay == 0 || + __get_tb() < start_snooze) + continue; + + HMT_medium(); + + if (!(ppaca->lppaca.idle)) { + local_irq_disable(); + + /* + * We are about to sleep the thread + * and so wont be polling any + * more. + */ + clear_thread_flag(TIF_POLLING_NRFLAG); + + /* + * SMT dynamic mode. Cede will result + * in this thread going dormant, if the + * partner thread is still doing work. + * Thread wakes up if partner goes idle, + * an interrupt is presented, or a prod + * occurs. Returning from the cede + * enables external interrupts. + */ + if (!need_resched()) + cede_processor(); + else + local_irq_enable(); + } else { + /* + * Give the HV an opportunity at the + * processor, since we are not doing + * any work. + */ + poll_pending(); + } + } + + clear_thread_flag(TIF_POLLING_NRFLAG); + } else { + set_need_resched(); + } + + HMT_medium(); + lpaca->lppaca.idle = 0; + schedule(); + if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) + cpu_die(); + } + return 0; +} + +static int shared_idle(void) +{ + struct paca_struct *lpaca = get_paca(); + unsigned int cpu = smp_processor_id(); + + while (1) { + /* + * Indicate to the HV that we are idle. Now would be + * a good time to find other work to dispatch. + */ + lpaca->lppaca.idle = 1; + + while (!need_resched() && !cpu_is_offline(cpu)) { + local_irq_disable(); + + /* + * Yield the processor to the hypervisor. We return if + * an external interrupt occurs (which are driven prior + * to returning here) or if a prod occurs from another + * processor. When returning here, external interrupts + * are enabled. + * + * Check need_resched() again with interrupts disabled + * to avoid a race. + */ + if (!need_resched()) + cede_processor(); + else + local_irq_enable(); + } + + HMT_medium(); + lpaca->lppaca.idle = 0; + schedule(); + if (cpu_is_offline(smp_processor_id()) && + system_state == SYSTEM_RUNNING) + cpu_die(); + } + + return 0; +} + struct machdep_calls __initdata pSeries_md = { .probe = pSeries_probe, .setup_arch = pSeries_setup_arch, -- cgit v1.2.3 From 62d60e9f0f890c31e5a83a7d8ecdfd1c7975fdb9 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 7 Jul 2005 17:56:30 -0700 Subject: [PATCH] ppc64: Fixup platforms for new ppc_md.idle This patch fixes up iSeries, pSeries, pmac and maple to set the correct idle function for each platform. Signed-off-by: Michael Ellerman Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/iSeries_setup.c | 1 + arch/ppc64/kernel/maple_setup.c | 3 +++ arch/ppc64/kernel/pSeries_setup.c | 18 ++++++++++++++++++ arch/ppc64/kernel/pmac_setup.c | 5 ++++- 4 files changed, 26 insertions(+), 1 deletion(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c index 1139e27e171..fae215ea54b 100644 --- a/arch/ppc64/kernel/iSeries_setup.c +++ b/arch/ppc64/kernel/iSeries_setup.c @@ -940,5 +940,6 @@ void __init iSeries_early_setup(void) ppc_md.get_rtc_time = iSeries_get_rtc_time; ppc_md.calibrate_decr = iSeries_calibrate_decr; ppc_md.progress = iSeries_progress; + ppc_md.idle_loop = iSeries_idle; } diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c index da8900b51f4..bb55b5a5691 100644 --- a/arch/ppc64/kernel/maple_setup.c +++ b/arch/ppc64/kernel/maple_setup.c @@ -177,6 +177,8 @@ void __init maple_setup_arch(void) #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif + + printk(KERN_INFO "Using native/NAP idle loop\n"); } /* @@ -297,4 +299,5 @@ struct machdep_calls __initdata maple_md = { .get_rtc_time = maple_get_rtc_time, .calibrate_decr = generic_calibrate_decr, .progress = maple_progress, + .idle_loop = native_idle, }; diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c index 849ed9ba785..3f3be8ae935 100644 --- a/arch/ppc64/kernel/pSeries_setup.c +++ b/arch/ppc64/kernel/pSeries_setup.c @@ -19,6 +19,7 @@ #undef DEBUG #include +#include #include #include #include @@ -82,6 +83,9 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */ extern void pSeries_system_reset_exception(struct pt_regs *regs); extern int pSeries_machine_check_exception(struct pt_regs *regs); +static int shared_idle(void); +static int dedicated_idle(void); + static volatile void __iomem * chrp_int_ack_special; struct mpic *pSeries_mpic; @@ -229,6 +233,20 @@ static void __init pSeries_setup_arch(void) if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) vpa_init(boot_cpuid); + + /* Choose an idle loop */ + if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { + if (get_paca()->lppaca.shared_proc) { + printk(KERN_INFO "Using shared processor idle loop\n"); + ppc_md.idle_loop = shared_idle; + } else { + printk(KERN_INFO "Using dedicated idle loop\n"); + ppc_md.idle_loop = dedicated_idle; + } + } else { + printk(KERN_INFO "Using default idle loop\n"); + ppc_md.idle_loop = default_idle; + } } static int __init pSeries_init_panel(void) diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c index 6cf03d387b9..3013cdb5f93 100644 --- a/arch/ppc64/kernel/pmac_setup.c +++ b/arch/ppc64/kernel/pmac_setup.c @@ -186,6 +186,8 @@ void __init pmac_setup_arch(void) #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif + + printk(KERN_INFO "Using native/NAP idle loop\n"); } #ifdef CONFIG_SCSI @@ -507,5 +509,6 @@ struct machdep_calls __initdata pmac_md = { .calibrate_decr = pmac_calibrate_decr, .feature_call = pmac_do_feature_call, .progress = pmac_progress, - .check_legacy_ioport = pmac_check_legacy_ioport + .check_legacy_ioport = pmac_check_legacy_ioport, + .idle_loop = native_idle, }; -- cgit v1.2.3 From 08d5e3eb4b2141e1031835c89a62ee3ddf896641 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 7 Jul 2005 17:56:31 -0700 Subject: [PATCH] ppc64: Remove obsolete idle_setup() Now that the idle loop is configured by each platform we don't need idle_setup() anymore. Signed-off-by: Michael Ellerman Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/idle.c | 41 ----------------------------------------- 1 file changed, 41 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index 69b7c22bad5..b8cfb37e5f1 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c @@ -37,8 +37,6 @@ extern void power4_idle(void); -static int (*idle_loop)(void); - int default_idle(void) { long oldval; @@ -127,42 +125,3 @@ register_powersave_nap_sysctl(void) } __initcall(register_powersave_nap_sysctl); #endif - -int idle_setup(void) -{ - /* - * Move that junk to each platform specific file, eventually define - * a pSeries_idle for shared processor stuff - */ -#ifdef CONFIG_PPC_ISERIES - idle_loop = iSeries_idle; - return 1; -#else - idle_loop = default_idle; -#endif -#ifdef CONFIG_PPC_PSERIES - if (systemcfg->platform & PLATFORM_PSERIES) { - if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { - if (get_paca()->lppaca.shared_proc) { - printk(KERN_INFO "Using shared processor idle loop\n"); - idle_loop = shared_idle; - } else { - printk(KERN_INFO "Using dedicated idle loop\n"); - idle_loop = dedicated_idle; - } - } else { - printk(KERN_INFO "Using default idle loop\n"); - idle_loop = default_idle; - } - } -#endif /* CONFIG_PPC_PSERIES */ -#ifndef CONFIG_PPC_ISERIES - if (systemcfg->platform == PLATFORM_POWERMAC || - systemcfg->platform == PLATFORM_MAPLE) { - printk(KERN_INFO "Using native/NAP idle loop\n"); - idle_loop = native_idle; - } -#endif /* CONFIG_PPC_ISERIES */ - - return 1; -} -- cgit v1.2.3 From 3c57bb9f454e8fc7b3d815b991b0dec43c766641 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:32 -0700 Subject: [PATCH] ppc64: iSeries idle fixups - remove min/max yield time, we dont use the values anywhere - separate shared and dedicated idle loops - check need_resched again with irqs off to avoid sleeping with pending work - continually set runlatch off in idle loop, this means we dont need to turn the runlatch off on exception exit and suffer that associated cost for all exceptions. (A future patch will turn the runlatch on at exception entry) Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/iSeries_setup.c | 84 ++++++++++++++++++++++----------------- 1 file changed, 47 insertions(+), 37 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c index fae215ea54b..b4c919e18fa 100644 --- a/arch/ppc64/kernel/iSeries_setup.c +++ b/arch/ppc64/kernel/iSeries_setup.c @@ -834,9 +834,6 @@ static int __init iSeries_src_init(void) late_initcall(iSeries_src_init); -static unsigned long maxYieldTime = 0; -static unsigned long minYieldTime = 0xffffffffffffffffUL; - static inline void process_iSeries_events(void) { asm volatile ("li 0,0x5555; sc" : : : "r0", "r3"); @@ -845,7 +842,6 @@ static inline void process_iSeries_events(void) static void yield_shared_processor(void) { unsigned long tb; - unsigned long yieldTime; HvCall_setEnabledInterrupts(HvCall_MaskIPI | HvCall_MaskLpEvent | @@ -856,13 +852,6 @@ static void yield_shared_processor(void) /* Compute future tb value when yield should expire */ HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy); - yieldTime = get_tb() - tb; - if (yieldTime > maxYieldTime) - maxYieldTime = yieldTime; - - if (yieldTime < minYieldTime) - minYieldTime = yieldTime; - /* * The decrementer stops during the yield. Force a fake decrementer * here and let the timer_interrupt code sort out the actual time. @@ -871,45 +860,62 @@ static void yield_shared_processor(void) process_iSeries_events(); } -static int iSeries_idle(void) +static int iseries_shared_idle(void) { - struct paca_struct *lpaca; - long oldval; + while (1) { + while (!need_resched() && !hvlpevent_is_pending()) { + local_irq_disable(); + ppc64_runlatch_off(); + + /* Recheck with irqs off */ + if (!need_resched() && !hvlpevent_is_pending()) + yield_shared_processor(); - /* ensure iSeries run light will be out when idle */ - ppc64_runlatch_off(); + HMT_medium(); + local_irq_enable(); + } + + ppc64_runlatch_on(); - lpaca = get_paca(); + if (hvlpevent_is_pending()) + process_iSeries_events(); + + schedule(); + } + + return 0; +} + +static int iseries_dedicated_idle(void) +{ + struct paca_struct *lpaca = get_paca(); + long oldval; while (1) { - if (lpaca->lppaca.shared_proc) { - if (hvlpevent_is_pending()) - process_iSeries_events(); - if (!need_resched()) - yield_shared_processor(); - } else { - oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); + oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); - if (!oldval) { - set_thread_flag(TIF_POLLING_NRFLAG); + if (!oldval) { + set_thread_flag(TIF_POLLING_NRFLAG); - while (!need_resched()) { + while (!need_resched()) { + ppc64_runlatch_off(); + HMT_low(); + + if (hvlpevent_is_pending()) { HMT_medium(); - if (hvlpevent_is_pending()) - process_iSeries_events(); - HMT_low(); + ppc64_runlatch_on(); + process_iSeries_events(); } - - HMT_medium(); - clear_thread_flag(TIF_POLLING_NRFLAG); - } else { - set_need_resched(); } + + HMT_medium(); + clear_thread_flag(TIF_POLLING_NRFLAG); + } else { + set_need_resched(); } ppc64_runlatch_on(); schedule(); - ppc64_runlatch_off(); } return 0; @@ -940,6 +946,10 @@ void __init iSeries_early_setup(void) ppc_md.get_rtc_time = iSeries_get_rtc_time; ppc_md.calibrate_decr = iSeries_calibrate_decr; ppc_md.progress = iSeries_progress; - ppc_md.idle_loop = iSeries_idle; + + if (get_paca()->lppaca.shared_proc) + ppc_md.idle_loop = iseries_shared_idle; + else + ppc_md.idle_loop = iseries_dedicated_idle; } -- cgit v1.2.3 From 050a09389e045f37e5bf08718cf36909766e20d1 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:33 -0700 Subject: [PATCH] ppc64: pSeries idle fixups - separate out sleep logic in dedicated_idle, it was so far indented that it got squashed against the right side of the screen. - add runlatch support, looping on runlatch disable. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/pSeries_setup.c | 113 +++++++++++++++++++++----------------- 1 file changed, 62 insertions(+), 51 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c index 3f3be8ae935..5bec956e44a 100644 --- a/arch/ppc64/kernel/pSeries_setup.c +++ b/arch/ppc64/kernel/pSeries_setup.c @@ -83,8 +83,8 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */ extern void pSeries_system_reset_exception(struct pt_regs *regs); extern int pSeries_machine_check_exception(struct pt_regs *regs); -static int shared_idle(void); -static int dedicated_idle(void); +static int pseries_shared_idle(void); +static int pseries_dedicated_idle(void); static volatile void __iomem * chrp_int_ack_special; struct mpic *pSeries_mpic; @@ -238,10 +238,10 @@ static void __init pSeries_setup_arch(void) if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { if (get_paca()->lppaca.shared_proc) { printk(KERN_INFO "Using shared processor idle loop\n"); - ppc_md.idle_loop = shared_idle; + ppc_md.idle_loop = pseries_shared_idle; } else { printk(KERN_INFO "Using dedicated idle loop\n"); - ppc_md.idle_loop = dedicated_idle; + ppc_md.idle_loop = pseries_dedicated_idle; } } else { printk(KERN_INFO "Using default idle loop\n"); @@ -438,15 +438,47 @@ static int __init pSeries_probe(int platform) DECLARE_PER_CPU(unsigned long, smt_snooze_delay); -int dedicated_idle(void) +static inline void dedicated_idle_sleep(unsigned int cpu) +{ + struct paca_struct *ppaca = &paca[cpu ^ 1]; + + /* Only sleep if the other thread is not idle */ + if (!(ppaca->lppaca.idle)) { + local_irq_disable(); + + /* + * We are about to sleep the thread and so wont be polling any + * more. + */ + clear_thread_flag(TIF_POLLING_NRFLAG); + + /* + * SMT dynamic mode. Cede will result in this thread going + * dormant, if the partner thread is still doing work. Thread + * wakes up if partner goes idle, an interrupt is presented, or + * a prod occurs. Returning from the cede enables external + * interrupts. + */ + if (!need_resched()) + cede_processor(); + else + local_irq_enable(); + } else { + /* + * Give the HV an opportunity at the processor, since we are + * not doing any work. + */ + poll_pending(); + } +} + +static int pseries_dedicated_idle(void) { long oldval; - struct paca_struct *lpaca = get_paca(), *ppaca; + struct paca_struct *lpaca = get_paca(); + unsigned int cpu = smp_processor_id(); unsigned long start_snooze; unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); - unsigned int cpu = smp_processor_id(); - - ppaca = &paca[cpu ^ 1]; while (1) { /* @@ -458,9 +490,13 @@ int dedicated_idle(void) oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); if (!oldval) { set_thread_flag(TIF_POLLING_NRFLAG); + start_snooze = __get_tb() + *smt_snooze_delay * tb_ticks_per_usec; + while (!need_resched() && !cpu_is_offline(cpu)) { + ppc64_runlatch_off(); + /* * Go into low thread priority and possibly * low power mode. @@ -468,60 +504,31 @@ int dedicated_idle(void) HMT_low(); HMT_very_low(); - if (*smt_snooze_delay == 0 || - __get_tb() < start_snooze) - continue; - - HMT_medium(); - - if (!(ppaca->lppaca.idle)) { - local_irq_disable(); - - /* - * We are about to sleep the thread - * and so wont be polling any - * more. - */ - clear_thread_flag(TIF_POLLING_NRFLAG); - - /* - * SMT dynamic mode. Cede will result - * in this thread going dormant, if the - * partner thread is still doing work. - * Thread wakes up if partner goes idle, - * an interrupt is presented, or a prod - * occurs. Returning from the cede - * enables external interrupts. - */ - if (!need_resched()) - cede_processor(); - else - local_irq_enable(); - } else { - /* - * Give the HV an opportunity at the - * processor, since we are not doing - * any work. - */ - poll_pending(); + if (*smt_snooze_delay != 0 && + __get_tb() > start_snooze) { + HMT_medium(); + dedicated_idle_sleep(cpu); } + } + HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); } else { set_need_resched(); } - HMT_medium(); lpaca->lppaca.idle = 0; + ppc64_runlatch_on(); + schedule(); + if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) cpu_die(); } - return 0; } -static int shared_idle(void) +static int pseries_shared_idle(void) { struct paca_struct *lpaca = get_paca(); unsigned int cpu = smp_processor_id(); @@ -535,6 +542,7 @@ static int shared_idle(void) while (!need_resched() && !cpu_is_offline(cpu)) { local_irq_disable(); + ppc64_runlatch_off(); /* * Yield the processor to the hypervisor. We return if @@ -550,13 +558,16 @@ static int shared_idle(void) cede_processor(); else local_irq_enable(); + + HMT_medium(); } - HMT_medium(); lpaca->lppaca.idle = 0; + ppc64_runlatch_on(); + schedule(); - if (cpu_is_offline(smp_processor_id()) && - system_state == SYSTEM_RUNNING) + + if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) cpu_die(); } -- cgit v1.2.3 From 45e75dfb609df4391636c2218bec5ea04536601d Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:33 -0700 Subject: [PATCH] ppc64: idle fixups - remove some unnecessary includes - add runlatch support - no need to use raw_smp_processor_id any more, current preempt debug logic checks for processes that are bound to one cpu. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/idle.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index b8cfb37e5f1..954395d4263 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c @@ -20,18 +20,12 @@ #include #include #include -#include #include -#include #include #include -#include #include #include -#include -#include -#include #include #include @@ -49,7 +43,8 @@ int default_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while (!need_resched() && !cpu_is_offline(cpu)) { - barrier(); + ppc64_runlatch_off(); + /* * Go into low thread priority and possibly * low power mode. @@ -64,6 +59,7 @@ int default_idle(void) set_need_resched(); } + ppc64_runlatch_on(); schedule(); if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) cpu_die(); @@ -74,17 +70,22 @@ int default_idle(void) int native_idle(void) { - while(1) { - /* check CPU type here */ + while (1) { + ppc64_runlatch_off(); + if (!need_resched()) power4_idle(); - if (need_resched()) + + if (need_resched()) { + ppc64_runlatch_on(); schedule(); + } - if (cpu_is_offline(raw_smp_processor_id()) && + if (cpu_is_offline(smp_processor_id()) && system_state == SYSTEM_RUNNING) cpu_die(); } + return 0; } -- cgit v1.2.3 From 10ca1e1ed58d6428924b5a44539334c341a6f485 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:34 -0700 Subject: [PATCH] ppc64: fix compile warning Fix a compile warning introduced by the previous patches. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/iSeries_setup.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c index b4c919e18fa..32483dc16d4 100644 --- a/arch/ppc64/kernel/iSeries_setup.c +++ b/arch/ppc64/kernel/iSeries_setup.c @@ -888,7 +888,6 @@ static int iseries_shared_idle(void) static int iseries_dedicated_idle(void) { - struct paca_struct *lpaca = get_paca(); long oldval; while (1) { -- cgit v1.2.3 From b6bff397ea9c36d410212f785ee644103146102a Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 7 Jul 2005 17:56:35 -0700 Subject: [PATCH] ppc64: Be consistent about printing which idle loop we're using Not sure if we really need this, but it was handy to know which iSeries loop I was testing. Be consistent about printing which idle loop we're using, with this patch we cover all cases. Signed-off-by: Michael Ellerman Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/iSeries_setup.c | 7 +++++-- arch/ppc64/kernel/setup.c | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c index 32483dc16d4..077c82fc9f3 100644 --- a/arch/ppc64/kernel/iSeries_setup.c +++ b/arch/ppc64/kernel/iSeries_setup.c @@ -946,9 +946,12 @@ void __init iSeries_early_setup(void) ppc_md.calibrate_decr = iSeries_calibrate_decr; ppc_md.progress = iSeries_progress; - if (get_paca()->lppaca.shared_proc) + if (get_paca()->lppaca.shared_proc) { ppc_md.idle_loop = iseries_shared_idle; - else + printk(KERN_INFO "Using shared processor idle loop\n"); + } else { ppc_md.idle_loop = iseries_dedicated_idle; + printk(KERN_INFO "Using dedicated idle loop\n"); + } } diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c index a278998ecb4..d1b33f0b26c 100644 --- a/arch/ppc64/kernel/setup.c +++ b/arch/ppc64/kernel/setup.c @@ -1081,8 +1081,10 @@ void __init setup_arch(char **cmdline_p) ppc_md.setup_arch(); /* Use the default idle loop if the platform hasn't provided one. */ - if (NULL == ppc_md.idle_loop) + if (NULL == ppc_md.idle_loop) { ppc_md.idle_loop = default_idle; + printk(KERN_INFO "Using default idle loop\n"); + } paging_init(); ppc64_boot_msg(0x15, "Setup Done"); -- cgit v1.2.3 From 059e277e5ba6486b5ef66deb336d4ef887f163ac Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Jul 2005 17:56:36 -0700 Subject: [PATCH] ppc64: silence perfmon exception warnings We dont need to use the PERFMON exception on POWER5, in fact the firmware returns an error. Due to this just remove the warning. Also now that we have proper runlatch support we can remove the bootup hack. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/kernel/sysfs.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'arch/ppc64') diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c index 2f704a2cafb..02b8ac4e016 100644 --- a/arch/ppc64/kernel/sysfs.c +++ b/arch/ppc64/kernel/sysfs.c @@ -112,7 +112,6 @@ void ppc64_enable_pmcs(void) unsigned long hid0; #ifdef CONFIG_PPC_PSERIES unsigned long set, reset; - int ret; #endif /* CONFIG_PPC_PSERIES */ /* Only need to enable them once */ @@ -145,11 +144,7 @@ void ppc64_enable_pmcs(void) case PLATFORM_PSERIES_LPAR: set = 1UL << 63; reset = 0; - ret = plpar_hcall_norets(H_PERFMON, set, reset); - if (ret) - printk(KERN_ERR "H_PERFMON call on cpu %u " - "returned %d\n", - smp_processor_id(), ret); + plpar_hcall_norets(H_PERFMON, set, reset); break; #endif /* CONFIG_PPC_PSERIES */ @@ -161,13 +156,6 @@ void ppc64_enable_pmcs(void) /* instruct hypervisor to maintain PMCs */ if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) get_paca()->lppaca.pmcregs_in_use = 1; - - /* - * On SMT machines we have to set the run latch in the ctrl register - * in order to make PMC6 spin. - */ - if (cpu_has_feature(CPU_FTR_SMT)) - ppc64_runlatch_on(); #endif /* CONFIG_PPC_PSERIES */ } -- cgit v1.2.3