aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-05-16 23:13:42 +1000
committerPaul Mackerras <paulus@samba.org>2008-05-16 23:13:42 +1000
commitfcff474ea5cb17ff015aa40e92ed86fede41f1e2 (patch)
treea99c0e14daaf31cb078812fb2fbc6abadfcd738f /arch/powerpc
parent541b2755c2ef7dd2242ac606c115daa11e43ef69 (diff)
parentf26a3988917913b3d11b2bd741601a2c64ab9204 (diff)
Merge branch 'linux-2.6' into powerpc-next
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c28
-rw-r--r--arch/powerpc/mm/init_64.c10
-rw-r--r--arch/powerpc/mm/slb.c16
-rw-r--r--arch/powerpc/mm/slb_low.S16
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c6
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.h6
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
8 files changed, 67 insertions, 18 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 05f955f8df3..bf5b6d7ed30 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -94,6 +94,9 @@ unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
int mmu_vmalloc_psize = MMU_PAGE_4K;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
@@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void)
}
#endif /* CONFIG_PPC_64K_PAGES */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* We try to use 16M pages for vmemmap if that is supported
+ * and we have at least 1G of RAM at boot
+ */
+ if (mmu_psize_defs[MMU_PAGE_16M].shift &&
+ lmb_phys_mem_size() >= 0x40000000)
+ mmu_vmemmap_psize = MMU_PAGE_16M;
+ else if (mmu_psize_defs[MMU_PAGE_64K].shift)
+ mmu_vmemmap_psize = MMU_PAGE_64K;
+ else
+ mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
printk(KERN_DEBUG "Page orders: linear mapping = %d, "
- "virtual = %d, io = %d\n",
+ "virtual = %d, io = %d"
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ ", vmemmap = %d"
+#endif
+ "\n",
mmu_psize_defs[mmu_linear_psize].shift,
mmu_psize_defs[mmu_virtual_psize].shift,
- mmu_psize_defs[mmu_io_psize].shift);
+ mmu_psize_defs[mmu_io_psize].shift
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ ,mmu_psize_defs[mmu_vmemmap_psize].shift
+#endif
+ );
#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 1e52e97d740..6ef63caca68 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -19,6 +19,8 @@
*
*/
+#undef DEBUG
+
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -208,12 +210,12 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size)
}
int __meminit vmemmap_populate(struct page *start_page,
- unsigned long nr_pages, int node)
+ unsigned long nr_pages, int node)
{
unsigned long mode_rw;
unsigned long start = (unsigned long)start_page;
unsigned long end = (unsigned long)(start_page + nr_pages);
- unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
+ unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
@@ -235,11 +237,11 @@ int __meminit vmemmap_populate(struct page *start_page,
start, p, __pa(p));
mapped = htab_bolt_mapping(start, start + page_size,
- __pa(p), mode_rw, mmu_linear_psize,
+ __pa(p), mode_rw, mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON(mapped < 0);
}
return 0;
}
-#endif
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index cf8705e32d6..89497fb0428 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -28,7 +28,7 @@
#include <asm/udbg.h>
#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG(fmt...) printk(fmt)
#else
#define DBG pr_debug
#endif
@@ -263,13 +263,19 @@ void slb_initialize(void)
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
extern unsigned int *slb_compare_rr_to_size;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ extern unsigned int *slb_miss_kernel_load_vmemmap;
+ unsigned long vmemmap_llp;
+#endif
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
io_llp = mmu_psize_defs[mmu_io_psize].sllp;
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
-
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
+#endif
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
@@ -281,6 +287,12 @@ void slb_initialize(void)
DBG("SLB: linear LLP = %04lx\n", linear_llp);
DBG("SLB: io LLP = %04lx\n", io_llp);
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ patch_slb_encoding(slb_miss_kernel_load_vmemmap,
+ SLB_VSID_KERNEL | vmemmap_llp);
+ DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
+#endif
}
get_paca()->stab_rr = SLB_NUM_BOLTED;
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 657f6b37e9d..bc44dc4b5c6 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -47,8 +47,7 @@ _GLOBAL(slb_allocate_realmode)
* it to VSID 0, which is reserved as a bad VSID - one which
* will never have any pages in it. */
- /* Check if hitting the linear mapping of the vmalloc/ioremap
- * kernel space
+ /* Check if hitting the linear mapping or some other kernel space
*/
bne cr7,1f
@@ -62,7 +61,18 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
b slb_finish_load_1T
-1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
+1:
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* Check virtual memmap region. To be patches at kernel boot */
+ cmpldi cr0,r9,0xf
+ bne 1f
+_GLOBAL(slb_miss_kernel_load_vmemmap)
+ li r11,0
+ b 6f
+1:
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+ /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
* will be patched by the kernel at boot
*/
BEGIN_FTR_SECTION
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
index 3b84e8be314..b5f84e8f089 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -118,7 +118,7 @@ static void iowa_##name at \
#undef DEF_PCI_AC_RET
#undef DEF_PCI_AC_NORET
-static struct ppc_pci_io __initdata iowa_pci_io = {
+static const struct ppc_pci_io __devinitconst iowa_pci_io = {
#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name,
#define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name,
@@ -146,7 +146,7 @@ static void __iomem *iowa_ioremap(unsigned long addr, unsigned long size,
}
/* Regist new bus to support workaround */
-void __init iowa_register_bus(struct pci_controller *phb,
+void __devinit iowa_register_bus(struct pci_controller *phb,
struct ppc_pci_io *ops,
int (*initfunc)(struct iowa_bus *, void *), void *data)
{
@@ -173,7 +173,7 @@ void __init iowa_register_bus(struct pci_controller *phb,
}
/* enable IO workaround */
-void __init io_workaround_init(void)
+void __devinit io_workaround_init(void)
{
static int io_workaround_inited;
diff --git a/arch/powerpc/platforms/cell/io-workarounds.h b/arch/powerpc/platforms/cell/io-workarounds.h
index 79d8ed3d510..6efc7782ebf 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.h
+++ b/arch/powerpc/platforms/cell/io-workarounds.h
@@ -31,9 +31,9 @@ struct iowa_bus {
void *private;
};
-void __init io_workaround_init(void);
-void __init iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
- int (*)(struct iowa_bus *, void *), void *);
+void __devinit io_workaround_init(void);
+void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
+ int (*)(struct iowa_bus *, void *), void *);
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
struct iowa_bus *iowa_pio_find_bus(unsigned long);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 80911a37340..c81341ff75b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -32,6 +32,7 @@
#include <linux/marker.h>
#include <asm/io.h>
+#include <asm/time.h>
#include <asm/spu.h>
#include <asm/spu_info.h>
#include <asm/uaccess.h>
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 2e411f23462..745dd51ec37 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -659,7 +659,7 @@ static struct spu *find_victim(struct spu_context *ctx)
victim->stats.invol_ctx_switch++;
spu->stats.invol_ctx_switch++;
- if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
+ if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
spu_add_to_rq(victim);
mutex_unlock(&victim->state_mutex);