aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 17:56:00 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 17:56:00 -0800
commit0174f72f848dfe7dc7488799776303c81b181b16 (patch)
tree3f92a0ea6d1780823f2c56e512942bcc6e7817ef /include
parent302fe1758d85ad9c868e77625f61b7edad106381 (diff)
parentba76cd575ffd461d83507c23cf53c78d56d1ea0a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge
Diffstat (limited to 'include')
-rw-r--r--include/asm-powerpc/btext.h (renamed from include/asm-ppc64/btext.h)0
-rw-r--r--include/asm-powerpc/delay.h (renamed from include/asm-ppc64/delay.h)19
-rw-r--r--include/asm-powerpc/eeh.h (renamed from include/asm-ppc64/eeh.h)0
-rw-r--r--include/asm-powerpc/floppy.h (renamed from include/asm-ppc64/floppy.h)25
-rw-r--r--include/asm-powerpc/hvconsole.h (renamed from include/asm-ppc64/hvconsole.h)0
-rw-r--r--include/asm-powerpc/hvcserver.h (renamed from include/asm-ppc64/hvcserver.h)0
-rw-r--r--include/asm-powerpc/kexec.h1
-rw-r--r--include/asm-powerpc/machdep.h4
-rw-r--r--include/asm-powerpc/nvram.h (renamed from include/asm-ppc64/nvram.h)17
-rw-r--r--include/asm-powerpc/page.h179
-rw-r--r--include/asm-powerpc/page_32.h40
-rw-r--r--include/asm-powerpc/page_64.h174
-rw-r--r--include/asm-powerpc/serial.h (renamed from include/asm-ppc64/serial.h)19
-rw-r--r--include/asm-powerpc/vdso_datapage.h2
-rw-r--r--include/asm-ppc/nvram.h73
-rw-r--r--include/asm-ppc64/page.h328
-rw-r--r--include/asm-ppc64/prom.h220
-rw-r--r--include/asm-ppc64/system.h310
18 files changed, 440 insertions, 971 deletions
diff --git a/include/asm-ppc64/btext.h b/include/asm-powerpc/btext.h
index 71cce36bc63..71cce36bc63 100644
--- a/include/asm-ppc64/btext.h
+++ b/include/asm-powerpc/btext.h
diff --git a/include/asm-ppc64/delay.h b/include/asm-powerpc/delay.h
index 05f198cf73d..1492aa9ab71 100644
--- a/include/asm-ppc64/delay.h
+++ b/include/asm-powerpc/delay.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_DELAY_H
-#define _PPC64_DELAY_H
+#ifndef _ASM_POWERPC_DELAY_H
+#define _ASM_POWERPC_DELAY_H
/*
* Copyright 1996, Paul Mackerras.
@@ -15,10 +15,17 @@
extern unsigned long tb_ticks_per_usec;
-/* define these here to prevent circular dependencies */
+#ifdef CONFIG_PPC64
+/* define these here to prevent circular dependencies */
+/* these instructions control the thread priority on multi-threaded cpus */
#define __HMT_low() asm volatile("or 1,1,1")
#define __HMT_medium() asm volatile("or 2,2,2")
-#define __barrier() asm volatile("":::"memory")
+#else
+#define __HMT_low()
+#define __HMT_medium()
+#endif
+
+#define __barrier() asm volatile("" ::: "memory")
static inline unsigned long __get_tb(void)
{
@@ -32,7 +39,7 @@ static inline void __delay(unsigned long loops)
{
unsigned long start = __get_tb();
- while((__get_tb()-start) < loops)
+ while((__get_tb() - start) < loops)
__HMT_low();
__HMT_medium();
__barrier();
@@ -45,4 +52,4 @@ static inline void udelay(unsigned long usecs)
__delay(loops);
}
-#endif /* _PPC64_DELAY_H */
+#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/include/asm-ppc64/eeh.h b/include/asm-powerpc/eeh.h
index 89f26ab3190..89f26ab3190 100644
--- a/include/asm-ppc64/eeh.h
+++ b/include/asm-powerpc/eeh.h
diff --git a/include/asm-ppc64/floppy.h b/include/asm-powerpc/floppy.h
index 5c497b588e5..64276a3f615 100644
--- a/include/asm-ppc64/floppy.h
+++ b/include/asm-powerpc/floppy.h
@@ -7,22 +7,22 @@
*
* Copyright (C) 1995
*/
-#ifndef __ASM_PPC64_FLOPPY_H
-#define __ASM_PPC64_FLOPPY_H
+#ifndef __ASM_POWERPC_FLOPPY_H
+#define __ASM_POWERPC_FLOPPY_H
#include <linux/config.h>
#include <asm/machdep.h>
-#define fd_inb(port) inb_p(port)
-#define fd_outb(value,port) outb_p(value,port)
+#define fd_inb(port) inb_p(port)
+#define fd_outb(value,port) outb_p(value,port)
#define fd_enable_dma() enable_dma(FLOPPY_DMA)
#define fd_disable_dma() disable_dma(FLOPPY_DMA)
-#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy")
+#define fd_request_dma() request_dma(FLOPPY_DMA, "floppy")
#define fd_free_dma() free_dma(FLOPPY_DMA)
#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
-#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
-#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
+#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA, mode)
+#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_cacheflush(addr,size) /* nothing */
@@ -35,10 +35,10 @@
#include <linux/pci.h>
-#define fd_dma_setup(addr,size,mode,io) ppc64_fd_dma_setup(addr,size,mode,io)
+#define fd_dma_setup(addr,size,mode,io) powerpc_fd_dma_setup(addr,size,mode,io)
-static __inline__ int
-ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
+static __inline__ int powerpc_fd_dma_setup(char *addr, unsigned long size,
+ int mode, int io)
{
static unsigned long prev_size;
static dma_addr_t bus_addr = 0;
@@ -55,9 +55,8 @@ ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
bus_addr = 0;
}
- if (!bus_addr) /* need to map it */ {
+ if (!bus_addr) /* need to map it */
bus_addr = pci_map_single(NULL, addr, size, dir);
- }
/* remember this one as prev */
prev_addr = addr;
@@ -103,4 +102,4 @@ static int FDC2 = -1;
#define EXTRA_FLOPPY_PARAMS
-#endif /* __ASM_PPC64_FLOPPY_H */
+#endif /* __ASM_POWERPC_FLOPPY_H */
diff --git a/include/asm-ppc64/hvconsole.h b/include/asm-powerpc/hvconsole.h
index 6da93ce74dc..6da93ce74dc 100644
--- a/include/asm-ppc64/hvconsole.h
+++ b/include/asm-powerpc/hvconsole.h
diff --git a/include/asm-ppc64/hvcserver.h b/include/asm-powerpc/hvcserver.h
index aecba966579..aecba966579 100644
--- a/include/asm-ppc64/hvcserver.h
+++ b/include/asm-powerpc/hvcserver.h
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
index 062ab9ba68e..c72ffc709ea 100644
--- a/include/asm-powerpc/kexec.h
+++ b/include/asm-powerpc/kexec.h
@@ -40,6 +40,7 @@ extern note_buf_t crash_notes[];
#ifdef __powerpc64__
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
+extern void __init kexec_setup(void);
#else
struct kimage;
extern void machine_kexec_simple(struct kimage *image);
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index 5670f0cd614..c011abb8b60 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -93,7 +93,9 @@ struct machdep_calls {
void (*init_IRQ)(void);
int (*get_irq)(struct pt_regs *);
- void (*cpu_irq_down)(int secondary);
+#ifdef CONFIG_KEXEC
+ void (*kexec_cpu_down)(int crash_shutdown, int secondary);
+#endif
/* PCI stuff */
/* Called after scanning the bus, before allocating resources */
diff --git a/include/asm-ppc64/nvram.h b/include/asm-powerpc/nvram.h
index def47d720d3..24bd8c2388e 100644
--- a/include/asm-ppc64/nvram.h
+++ b/include/asm-powerpc/nvram.h
@@ -1,6 +1,5 @@
/*
- * PreP compliant NVRAM access
- * This needs to be updated for PPC64
+ * NVRAM definitions and access functions.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -8,8 +7,8 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifndef _PPC64_NVRAM_H
-#define _PPC64_NVRAM_H
+#ifndef _ASM_POWERPC_NVRAM_H
+#define _ASM_POWERPC_NVRAM_H
#define NVRW_CNT 0x20
#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
@@ -69,7 +68,6 @@ extern int nvram_clear_error_log(void);
extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
extern int pSeries_nvram_init(void);
-extern int pmac_nvram_init(void);
extern int mmio_nvram_init(void);
/* PowerMac specific nvram stuffs */
@@ -88,7 +86,11 @@ extern u8 pmac_xpram_read(int xpaddr);
extern void pmac_xpram_write(int xpaddr, u8 data);
/* Synchronize NVRAM */
-extern int nvram_sync(void);
+extern void nvram_sync(void);
+
+/* Normal access to NVRAM */
+extern unsigned char nvram_read_byte(int i);
+extern void nvram_write_byte(unsigned char c, int i);
/* Some offsets in XPRAM */
#define PMAC_XPRAM_MACHINE_LOC 0xe4
@@ -112,5 +114,6 @@ struct pmac_machine_location {
_IOWR('p', 0x40, int)
#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
+#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */
-#endif /* _PPC64_NVRAM_H */
+#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
new file mode 100644
index 00000000000..18c1e5ee81a
--- /dev/null
+++ b/include/asm-powerpc/page.h
@@ -0,0 +1,179 @@
+#ifndef _ASM_POWERPC_PAGE_H
+#define _ASM_POWERPC_PAGE_H
+
+/*
+ * Copyright (C) 2001,2005 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+#include <asm/asm-compat.h>
+
+/*
+ * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
+ * page size. When using 64K pages however, whether we are really supporting
+ * 64K pages in HW or not is irrelevant to those definitions.
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+#define PAGE_SHIFT 16
+#else
+#define PAGE_SHIFT 12
+#endif
+
+#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
+
+/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
+#define __HAVE_ARCH_GATE_AREA 1
+
+/*
+ * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
+ * assign PAGE_MASK to a larger type it gets extended the way we want
+ * (i.e. with 1s in the high bits)
+ */
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+
+#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START)
+#define KERNELBASE PAGE_OFFSET
+
+#ifdef CONFIG_DISCONTIGMEM
+#define page_to_pfn(page) discontigmem_page_to_pfn(page)
+#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
+#define pfn_valid(pfn) discontigmem_pfn_valid(pfn)
+#endif
+
+#ifdef CONFIG_FLATMEM
+#define pfn_to_page(pfn) (mem_map + (pfn))
+#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
+#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
+
+/*
+ * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifdef __powerpc64__
+#include <asm/page_64.h>
+#else
+#include <asm/page_32.h>
+#endif
+
+/* align addr on a size boundary - adjust address up/down if needed */
+#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
+#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
+
+/* align addr on a size boundary - adjust address up if needed */
+#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
+
+#ifndef __ASSEMBLY__
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/* These are used to make use of C type-checking. */
+
+/* PTE level */
+typedef struct { pte_basic_t pte; } pte_t;
+#define pte_val(x) ((x).pte)
+#define __pte(x) ((pte_t) { (x) })
+
+/* 64k pages additionally define a bigger "real PTE" type that gathers
+ * the "second half" part of the PTE for pseudo 64k pages
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef struct { pte_t pte; } real_pte_t;
+#endif
+
+/* PMD level */
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+/* PUD level exusts only on 4k pages */
+#ifndef CONFIG_PPC_64K_PAGES
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+#endif
+
+/* PGD level */
+typedef struct { unsigned long pgd; } pgd_t;
+#define pgd_val(x) ((x).pgd)
+#define __pgd(x) ((pgd_t) { (x) })
+
+/* Page protection bits */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#else
+
+/*
+ * .. while these make it easier on the compiler
+ */
+
+typedef pte_basic_t pte_t;
+#define pte_val(x) (x)
+#define __pte(x) (x)
+
+#ifdef CONFIG_PPC_64K_PAGES
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef unsigned long real_pte_t;
+#endif
+
+
+typedef unsigned long pmd_t;
+#define pmd_val(x) (x)
+#define __pmd(x) (x)
+
+#ifndef CONFIG_PPC_64K_PAGES
+typedef unsigned long pud_t;
+#define pud_val(x) (x)
+#define __pud(x) (x)
+#endif
+
+typedef unsigned long pgd_t;
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+typedef unsigned long pgprot_t;
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+struct page;
+extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
+extern void copy_user_page(void *to, void *from, unsigned long vaddr,
+ struct page *p);
+extern int page_is_ram(unsigned long pfn);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h
new file mode 100644
index 00000000000..7259cfd85da
--- /dev/null
+++ b/include/asm-powerpc/page_32.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_PAGE_32_H
+#define _ASM_POWERPC_PAGE_32_H
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
+
+#define PPC_MEMSTART 0
+
+#ifndef __ASSEMBLY__
+/*
+ * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
+ * physical addressing. For now this just the IBM PPC440.
+ */
+#ifdef CONFIG_PTE_64BIT
+typedef unsigned long long pte_basic_t;
+#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
+#define PTE_FMT "%16Lx"
+#else
+typedef unsigned long pte_basic_t;
+#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
+#define PTE_FMT "%.8lx"
+#endif
+
+struct page;
+extern void clear_pages(void *page, int order);
+static inline void clear_page(void *page) { clear_pages(page, 0); }
+extern void copy_page(void *to, void *from);
+
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+ int lz;
+
+ size = (size-1) >> PAGE_SHIFT;
+ asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
+ return 32 - lz;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
new file mode 100644
index 00000000000..c16f106b537
--- /dev/null
+++ b/include/asm-powerpc/page_64.h
@@ -0,0 +1,174 @@
+#ifndef _ASM_POWERPC_PAGE_64_H
+#define _ASM_POWERPC_PAGE_64_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
+ * specific, every notion of page number shared with the firmware, TCEs,
+ * iommu, etc... still uses a page size of 4K.
+ */
+#define HW_PAGE_SHIFT 12
+#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
+#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
+
+/*
+ * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
+ * HW_PAGE_SHIFT, that is 4K pages.
+ */
+#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
+
+#define REGION_SIZE 4UL
+#define REGION_SHIFT 60UL
+#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
+
+#define VMALLOCBASE ASM_CONST(0xD000000000000000)
+#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
+#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
+#define USER_REGION_ID (0UL)
+#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
+
+/* Segment size */
+#define SID_SHIFT 28
+#define SID_MASK 0xfffffffffUL
+#define ESID_MASK 0xfffffffff0000000UL
+#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
+
+#ifndef __ASSEMBLY__
+#include <asm/cache.h>
+
+typedef unsigned long pte_basic_t;
+
+static __inline__ void clear_page(void *addr)
+{
+ unsigned long lines, line_size;
+
+ line_size = ppc64_caches.dline_size;
+ lines = ppc64_caches.dlines_per_page;
+
+ __asm__ __volatile__(
+ "mtctr %1 # clear_page\n\
+1: dcbz 0,%0\n\
+ add %0,%0,%3\n\
+ bdnz+ 1b"
+ : "=r" (addr)
+ : "r" (lines), "0" (addr), "r" (line_size)
+ : "ctr", "memory");
+}
+
+extern void copy_4K_page(void *to, void *from);
+
+#ifdef CONFIG_PPC_64K_PAGES
+static inline void copy_page(void *to, void *from)
+{
+ unsigned int i;
+ for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
+ copy_4K_page(to, from);
+ to += 4096;
+ from += 4096;
+ }
+}
+#else /* CONFIG_PPC_64K_PAGES */
+static inline void copy_page(void *to, void *from)
+{
+ copy_4K_page(to, from);
+}
+#endif /* CONFIG_PPC_64K_PAGES */
+
+/* Log 2 of page table size */
+extern u64 ppc64_pft_size;
+
+/* Large pages size */
+extern unsigned int HPAGE_SHIFT;
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define HTLB_AREA_SHIFT 40
+#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
+#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
+
+#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
+ - (1U << GET_ESID(addr))) & 0xffff)
+#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
+ - (1U << GET_HTLB_AREA(addr))) & 0xffff)
+
+#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
+#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
+#define ARCH_HAS_SETCLEAR_HUGE_PTE
+
+#define touches_hugepage_low_range(mm, addr, len) \
+ (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
+#define touches_hugepage_high_range(mm, addr, len) \
+ (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
+
+#define __within_hugepage_low_range(addr, len, segmask) \
+ ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
+#define within_hugepage_low_range(addr, len) \
+ __within_hugepage_low_range((addr), (len), \
+ current->mm->context.low_htlb_areas)
+#define __within_hugepage_high_range(addr, len, zonemask) \
+ ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
+#define within_hugepage_high_range(addr, len) \
+ __within_hugepage_high_range((addr), (len), \
+ current->mm->context.high_htlb_areas)
+
+#define is_hugepage_only_range(mm, addr, len) \
+ (touches_hugepage_high_range((mm), (addr), (len)) || \
+ touches_hugepage_low_range((mm), (addr), (len)))
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+
+#define in_hugepage_area(context, addr) \
+ (cpu_has_feature(CPU_FTR_16M_PAGE) && \
+ ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
+ ( ((addr) < 0x100000000L) && \
+ ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
+
+#else /* !CONFIG_HUGETLB_PAGE */
+
+#define in_hugepage_area(mm, addr) 0
+
+#endif /* !CONFIG_HUGETLB_PAGE */
+
+#ifdef MODULE
+#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
+#else
+#define __page_aligned \
+ __attribute__((__aligned__(PAGE_SIZE), \
+ __section__(".data.page_aligned")))
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
+
+/*
+ * This is the default if a program doesn't have a PT_GNU_STACK
+ * program header entry. The PPC64 ELF ABI has a non executable stack
+ * stack by default, so in the absense of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
+
+#include <asm-generic/page.h>
+
+#endif /* _ASM_POWERPC_PAGE_64_H */
diff --git a/include/asm-ppc64/serial.h b/include/asm-powerpc/serial.h
index d6bcb79b7d7..b273d630b32 100644
--- a/include/asm-ppc64/serial.h
+++ b/include/asm-powerpc/serial.h
@@ -1,21 +1,16 @@
/*
- * include/asm-ppc64/serial.h
- */
-#ifndef _PPC64_SERIAL_H
-#define _PPC64_SERIAL_H
-
-/*
- * This assumes you have a 1.8432 MHz clock for your UART.
- *
- * It'd be nice if someone built a serial card with a 24.576 MHz
- * clock, since the 16550A is capable of handling a top speed of 1.5
- * megabits/second; but this requires the faster clock.
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_SERIAL_H
+#define _ASM_POWERPC_SERIAL_H
+
+/*
+ * Serial ports are not listed here, because they are discovered
+ * through the device tree.
+ */
/* Default baud base if not found in device-tree */
#define BASE_BAUD ( 1843200 / 16 )
diff --git a/include/asm-powerpc/vdso_datapage.h b/include/asm-powerpc/vdso_datapage.h
index fc323b51366..411832d5bbd 100644
--- a/include/asm-powerpc/vdso_datapage.h
+++ b/include/asm-powerpc/vdso_datapage.h
@@ -73,7 +73,7 @@ struct vdso_data {
/* those additional ones don't have to be located anywhere
* special as they were not part of the original systemcfg
*/
- __s64 wtom_clock_sec; /* Wall to monotonic clock */
+ __s32 wtom_clock_sec; /* Wall to monotonic clock */
__s32 wtom_clock_nsec;
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
diff --git a/include/asm-ppc/nvram.h b/include/asm-ppc/nvram.h
deleted file mode 100644
index 31ef16e3fc4..00000000000
--- a/include/asm-ppc/nvram.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * PreP compliant NVRAM access
- */
-
-#ifdef __KERNEL__
-#ifndef _PPC_NVRAM_H
-#define _PPC_NVRAM_H
-
-#define NVRAM_AS0 0x74
-#define NVRAM_AS1 0x75
-#define NVRAM_DATA 0x77
-
-
-/* RTC Offsets */
-
-#define MOTO_RTC_SECONDS 0x1FF9
-#define MOTO_RTC_MINUTES 0x1FFA
-#define MOTO_RTC_HOURS 0x1FFB
-#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
-#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
-#define MOTO_RTC_MONTH 0x1FFE
-#define MOTO_RTC_YEAR 0x1FFF
-#define MOTO_RTC_CONTROLA 0x1FF8
-#define MOTO_RTC_CONTROLB 0x1FF9
-
-/* PowerMac specific nvram stuffs */
-
-enum {
- pmac_nvram_OF, /* Open Firmware partition */
- pmac_nvram_XPRAM, /* MacOS XPRAM partition */
- pmac_nvram_NR /* MacOS Name Registry partition */
-};
-
-/* Return partition offset in nvram */
-extern int pmac_get_partition(int partition);
-
-/* Direct access to XPRAM on PowerMacs */
-extern u8 pmac_xpram_read(int xpaddr);
-extern void pmac_xpram_write(int xpaddr, u8 data);
-
-/* Synchronize NVRAM */
-extern void nvram_sync(void);
-
-/* Normal access to NVRAM */
-extern unsigned char nvram_read_byte(int i);
-extern void nvram_write_byte(unsigned char c, int i);
-
-/* Some offsets in XPRAM */
-#define PMAC_XPRAM_MACHINE_LOC 0xe4
-#define PMAC_XPRAM_SOUND_VOLUME 0x08
-
-/* Machine location structure in PowerMac XPRAM */
-struct pmac_machine_location {
- unsigned int latitude; /* 2+30 bit Fractional number */
- unsigned int longitude; /* 2+30 bit Fractional number */
- unsigned int delta; /* mix of GMT delta and DLS */
-};
-
-/*
- * /dev/nvram ioctls
- *
- * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is
- * definitely obsolete. Do not use it if you can avoid it
- */
-
-#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \
- _IOWR('p', 0x40, int)
-
-#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
-#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */
-
-#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
deleted file mode 100644
index 3efc3288f7e..00000000000
--- a/include/asm-ppc64/page.h
+++ /dev/null
@@ -1,328 +0,0 @@
-#ifndef _PPC64_PAGE_H
-#define _PPC64_PAGE_H
-
-/*
- * Copyright (C) 2001 PPC64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/asm-compat.h>
-
-/*
- * We support either 4k or 64k software page size. When using 64k pages
- * however, wether we are really supporting 64k pages in HW or not is
- * irrelevant to those definitions. We always define HW_PAGE_SHIFT to 12
- * as use of 64k pages remains a linux kernel specific, every notion of
- * page number shared with the firmware, TCEs, iommu, etc... still assumes
- * a page size of 4096.
- */
-#ifdef CONFIG_PPC_64K_PAGES
-#define PAGE_SHIFT 16
-#else
-#define PAGE_SHIFT 12
-#endif
-
-#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-/* HW_PAGE_SHIFT is always 4k pages */
-#define HW_PAGE_SHIFT 12
-#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
-#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
-
-/* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
- * HW_PAGE_SHIFT, that is 4k pages
- */
-#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
-
-/* Segment size */
-#define SID_SHIFT 28
-#define SID_MASK 0xfffffffffUL
-#define ESID_MASK 0xfffffffff0000000UL
-#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
-
-/* Large pages size */
-
-#ifndef __ASSEMBLY__
-extern unsigned int HPAGE_SHIFT;
-#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
-#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#endif /* __ASSEMBLY__ */
-
-#ifdef CONFIG_HUGETLB_PAGE
-
-
-#define HTLB_AREA_SHIFT 40
-#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
-#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
-
-#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- - (1U << GET_ESID(addr))) & 0xffff)
-#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
- - (1U << GET_HTLB_AREA(addr))) & 0xffff)
-
-#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
-
-#define touches_hugepage_low_range(mm, addr, len) \
- (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
-#define touches_hugepage_high_range(mm, addr, len) \
- (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
-
-#define __within_hugepage_low_range(addr, len, segmask) \
- ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
-#define within_hugepage_low_range(addr, len) \
- __within_hugepage_low_range((addr), (len), \
- current->mm->context.low_htlb_areas)
-#define __within_hugepage_high_range(addr, len, zonemask) \
- ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
-#define within_hugepage_high_range(addr, len) \
- __within_hugepage_high_range((addr), (len), \
- current->mm->context.high_htlb_areas)
-
-#define is_hugepage_only_range(mm, addr, len) \
- (touches_hugepage_high_range((mm), (addr), (len)) || \
- touches_hugepage_low_range((mm), (addr), (len)))
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-
-#define in_hugepage_area(context, addr) \
- (cpu_has_feature(CPU_FTR_16M_PAGE) && \
- ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
- ( ((addr) < 0x100000000L) && \
- ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
-
-#else /* !CONFIG_HUGETLB_PAGE */
-
-#define in_hugepage_area(mm, addr) 0
-
-#endif /* !CONFIG_HUGETLB_PAGE */
-
-/* align addr on a size boundary - adjust address up/down if needed */
-#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
-#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
-
-/* align addr on a size boundary - adjust address up if needed */
-#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-#include <asm/cache.h>
-
-#undef STRICT_MM_TYPECHECKS
-
-#define REGION_SIZE 4UL
-#define REGION_SHIFT 60UL
-#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
-
-static __inline__ void clear_page(void *addr)
-{
- unsigned long lines, line_size;
-
- line_size = ppc64_caches.dline_size;
- lines = ppc64_caches.dlines_per_page;
-
- __asm__ __volatile__(
- "mtctr %1 # clear_page\n\
-1: dcbz 0,%0\n\
- add %0,%0,%3\n\
- bdnz+ 1b"
- : "=r" (addr)
- : "r" (lines), "0" (addr), "r" (line_size)
- : "ctr", "memory");
-}
-
-extern void copy_4K_page(void *to, void *from);
-
-#ifdef CONFIG_PPC_64K_PAGES
-static inline void copy_page(void *to, void *from)
-{
- unsigned int i;
- for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
- copy_4K_page(to, from);
- to += 4096;
- from += 4096;
- }
-}
-#else /* CONFIG_PPC_64K_PAGES */
-static inline void copy_page(void *to, void *from)
-{
- copy_4K_page(to, from);
-}
-#endif /* CONFIG_PPC_64K_PAGES */
-
-struct page;
-extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
-extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
-
-#ifdef STRICT_MM_TYPECHECKS
-/*
- * These are used to make use of C type-checking.
- * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
- */
-
-/* PTE level */
-typedef struct { unsigned long pte; } pte_t;
-#define pte_val(x) ((x).pte)
-#define __pte(x) ((pte_t) { (x) })
-
-/* 64k pages additionally define a bigger "real PTE" type that gathers
- * the "second half" part of the PTE for pseudo 64k pages
- */
-#ifdef CONFIG_PPC_64K_PAGES
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef struct { pte_t pte; } real_pte_t;
-#endif
-
-/* PMD level */
-typedef struct { unsigned long pmd; } pmd_t;
-#define pmd_val(x) ((x).pmd)
-#define __pmd(x) ((pmd_t) { (x) })
-
-/* PUD level exusts only on 4k pages */
-#ifndef CONFIG_PPC_64K_PAGES
-typedef struct { unsigned long pud; } pud_t;
-#define pud_val(x) ((x).pud)
-#define __pud(x) ((pud_t) { (x) })
-#endif
-
-/* PGD level */
-typedef struct { unsigned long pgd; } pgd_t;
-#define pgd_val(x) ((x).pgd)
-#define __pgd(x) ((pgd_t) { (x) })
-
-/* Page protection bits */
-typedef struct { unsigned long pgprot; } pgprot_t;
-#define pgprot_val(x) ((x).pgprot)
-#define __pgprot(x) ((pgprot_t) { (x) })
-
-#else
-
-/*
- * .. while these make it easier on the compiler
- */
-
-typedef unsigned long pte_t;
-#define pte_val(x) (x)
-#define __pte(x) (x)
-
-#ifdef CONFIG_PPC_64K_PAGES
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef unsigned long real_pte_t;
-#endif
-
-
-typedef unsigned long pmd_t;
-#define pmd_val(x) (x)
-#define __pmd(x) (x)
-
-#ifndef CONFIG_PPC_64K_PAGES
-typedef unsigned long pud_t;
-#define pud_val(x) (x)
-#define __pud(x) (x)
-#endif
-
-typedef unsigned long pgd_t;
-#define pgd_val(x) (x)
-#define pgprot_val(x) (x)
-
-typedef unsigned long pgprot_t;
-#define __pgd(x) (x)
-#define __pgprot(x) (x)
-
-#endif
-
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-
-extern int page_is_ram(unsigned long pfn);
-
-extern u64 ppc64_pft_size; /* Log 2 of page table size */
-
-/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
-#define __HAVE_ARCH_GATE_AREA 1
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef MODULE
-#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
-#else
-#define __page_aligned \
- __attribute__((__aligned__(PAGE_SIZE), \
- __section__(".data.page_aligned")))
-#endif
-
-
-/* This must match the -Ttext linker address */
-/* Note: tophys & tovirt make assumptions about how */
-/* KERNELBASE is defined for performance reasons. */
-/* When KERNELBASE moves, those macros may have */
-/* to change! */
-#define PAGE_OFFSET ASM_CONST(0xC000000000000000)
-#define KERNELBASE PAGE_OFFSET
-#define VMALLOCBASE ASM_CONST(0xD000000000000000)
-
-#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
-#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
-#define USER_REGION_ID (0UL)
-#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
-
-#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
-
-#ifdef CONFIG_FLATMEM
-#define pfn_to_page(pfn) (mem_map + (pfn))
-#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
-#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#endif
-
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-
-/*
- * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
- * and needs to be executable. This means the whole heap ends
- * up being executable.
- */
-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_DATA_DEFAULT_FLAGS \
- (test_thread_flag(TIF_32BIT) ? \
- VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
-
-/*
- * This is the default if a program doesn't have a PT_GNU_STACK
- * program header entry. The PPC64 ELF ABI has a non executable stack
- * stack by default, so in the absense of a PT_GNU_STACK program header
- * we turn execute permission off.
- */
-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_STACK_DEFAULT_FLAGS \
- (test_thread_flag(TIF_32BIT) ? \
- VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/page.h>
-
-#endif /* _PPC64_PAGE_H */
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
deleted file mode 100644
index ddfe186589f..00000000000
--- a/include/asm-ppc64/prom.h
+++ /dev/null
@@ -1,220 +0,0 @@
-#ifndef _PPC64_PROM_H
-#define _PPC64_PROM_H
-
-/*
- * Definitions for talking to the Open Firmware PROM on
- * Power Macintosh computers.
- *
- * Copyright (C) 1996 Paul Mackerras.
- *
- * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/config.h>
-#include <linux/proc_fs.h>
-#include <asm/atomic.h>
-
-#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
-#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
-#define RELOC(x) (*PTRRELOC(&(x)))
-
-/* Definitions used by the flattened device tree */
-#define OF_DT_HEADER 0xd00dfeed /* marker */
-#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
-#define OF_DT_END_NODE 0x2 /* End node */
-#define OF_DT_PROP 0x3 /* Property: name off, size,
- * content */
-#define OF_DT_NOP 0x4 /* nop */
-#define OF_DT_END 0x9
-
-#define OF_DT_VERSION 0x10
-
-/*
- * This is what gets passed to the kernel by prom_init or kexec
- *
- * The dt struct contains the device tree structure, full pathes and
- * property contents. The dt strings contain a separate block with just
- * the strings for the property names, and is fully page aligned and
- * self contained in a page, so that it can be kept around by the kernel,
- * each property name appears only once in this page (cheap compression)
- *
- * the mem_rsvmap contains a map of reserved ranges of physical memory,
- * passing it here instead of in the device-tree itself greatly simplifies
- * the job of everybody. It's just a list of u64 pairs (base/size) that
- * ends when size is 0
- */
-struct boot_param_header
-{
- u32 magic; /* magic word OF_DT_HEADER */
- u32 totalsize; /* total size of DT block */
- u32 off_dt_struct; /* offset to structure */
- u32 off_dt_strings; /* offset to strings */
- u32 off_mem_rsvmap; /* offset to memory reserve map */
- u32 version; /* format version */
- u32 last_comp_version; /* last compatible version */
- /* version 2 fields below */
- u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
- /* version 3 fields below */
- u32 dt_strings_size; /* size of the DT strings block */
-};
-
-
-
-typedef u32 phandle;
-typedef u32 ihandle;
-
-struct address_range {
- unsigned long space;
- unsigned long address;
- unsigned long size;
-};
-
-struct interrupt_info {
- int line;
- int sense; /* +ve/-ve logic, edge or level, etc. */
-};
-
-struct pci_address {
- u32 a_hi;
- u32 a_mid;
- u32 a_lo;
-};
-
-struct isa_address {
- u32 a_hi;
- u32 a_lo;
-};
-
-struct isa_range {
- struct isa_address isa_addr;
- struct pci_address pci_addr;
- unsigned int size;
-};
-
-struct reg_property {
- unsigned long address;
- unsigned long size;
-};
-
-struct reg_property32 {
- unsigned int address;
- unsigned int size;
-};
-
-struct reg_property64 {
- unsigned long address;
- unsigned long size;
-};
-
-struct property {
- char *name;
- int length;
- unsigned char *value;
- struct property *next;
-};
-
-struct device_node {
- char *name;
- char *type;
- phandle node;
- phandle linux_phandle;
- int n_addrs;
- struct address_range *addrs;
- int n_intrs;
- struct interrupt_info *intrs;
- char *full_name;
-
- struct property *properties;
- struct device_node *parent;
- struct device_node *child;
- struct device_node *sibling;
- struct device_node *next; /* next device of same type */
- struct device_node *allnext; /* next in list of all nodes */
- struct proc_dir_entry *pde; /* this node's proc directory */
- struct kref kref;
- unsigned long _flags;
- void *data;
-#ifdef CONFIG_PPC_ISERIES
- struct list_head Device_List;
-#endif
-};
-
-extern struct device_node *of_chosen;
-
-/* flag descriptions */
-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
-
-#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
-#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
-
-/*
- * Until 32-bit ppc can add proc_dir_entries to its device_node
- * definition, we cannot refer to pde, name_link, and addr_link
- * in arch-independent code.
- */
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
-static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
-{
- dn->pde = de;
-}
-
-
-/* OBSOLETE: Old stlye node lookup */
-extern struct device_node *find_devices(const char *name);
-extern struct device_node *find_type_devices(const char *type);
-extern struct device_node *find_path_device(const char *path);
-extern struct device_node *find_compatible_devices(const char *type,
- const char *compat);
-extern struct device_node *find_all_nodes(void);
-
-/* New style node lookup */
-extern struct device_node *of_find_node_by_name(struct device_node *from,
- const char *name);
-extern struct device_node *of_find_node_by_type(struct device_node *from,
- const char *type);
-extern struct device_node *of_find_compatible_node(struct device_node *from,
- const char *type, const char *compat);
-extern struct device_node *of_find_node_by_path(const char *path);
-extern struct device_node *of_find_node_by_phandle(phandle handle);
-extern struct device_node *of_find_all_nodes(struct device_node *prev);
-extern struct device_node *of_get_parent(const struct device_node *node);
-extern struct device_node *of_get_next_child(const struct device_node *node,
- struct device_node *prev);
-extern struct device_node *of_node_get(struct device_node *node);
-extern void of_node_put(struct device_node *node);
-
-/* For scanning the flat device-tree at boot time */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
- const char *uname, int depth,
- void *data),
- void *data);
-void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
- unsigned long *size);
-
-/* For updating the device tree at runtime */
-extern void of_attach_node(struct device_node *);
-extern void of_detach_node(const struct device_node *);
-
-/* Other Prototypes */
-extern unsigned long prom_init(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern void finish_device_tree(void);
-extern void unflatten_device_tree(void);
-extern void early_init_devtree(void *);
-extern int device_is_compatible(struct device_node *device, const char *);
-extern int machine_is_compatible(const char *compat);
-extern unsigned char *get_property(struct device_node *node, const char *name,
- int *lenp);
-extern void print_properties(struct device_node *node);
-extern int prom_n_addr_cells(struct device_node* np);
-extern int prom_n_size_cells(struct device_node* np);
-extern int prom_n_intr_cells(struct device_node* np);
-extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
-extern int prom_add_property(struct device_node* np, struct property* prop);
-
-#endif /* _PPC64_PROM_H */
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
deleted file mode 100644
index bf9a6aba19c..00000000000
--- a/include/asm-ppc64/system.h
+++ /dev/null
@@ -1,310 +0,0 @@
-#ifndef __PPC64_SYSTEM_H
-#define __PPC64_SYSTEM_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/compiler.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/hw_irq.h>
-#include <asm/synch.h>
-
-/*
- * Memory barrier.
- * The sync instruction guarantees that all memory accesses initiated
- * by this processor have been performed (with respect to all other
- * mechanisms that access memory). The eieio instruction is a barrier
- * providing an ordering (separately) for (a) cacheable stores and (b)
- * loads and stores to non-cacheable memory (e.g. I/O devices).
- *
- * mb() prevents loads and stores being reordered across this point.
- * rmb() prevents loads being reordered across this point.
- * wmb() prevents stores being reordered across this point.
- * read_barrier_depends() prevents data-dependent loads being reordered
- * across this point (nop on PPC).
- *
- * We have to use the sync instructions for mb(), since lwsync doesn't
- * order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though.
- * For wmb(), we use sync since wmb is used in drivers to order
- * stores to system memory with respect to writes to the device.
- * However, smp_wmb() can be a lighter-weight eieio barrier on
- * SMP since it is only used to order updates to system memory.
- */
-#define mb() __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
-#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define read_barrier_depends() do { } while(0)
-
-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
-#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() eieio()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() __asm__ __volatile__("": : :"memory")
-#define smp_rmb() __asm__ __volatile__("": : :"memory")
-#define smp_wmb() __asm__ __volatile__("": : :"memory")
-#define smp_read_barrier_depends() do { } while(0)
-#endif /* CONFIG_SMP */
-
-#ifdef __KERNEL__
-struct task_struct;
-struct pt_regs;
-
-#ifdef CONFIG_DEBUGGER
-
-extern int (*__debugger)(struct pt_regs *regs);
-extern int (*__debugger_ipi)(struct pt_regs *regs);
-extern int (*__debugger_bpt)(struct pt_regs *regs);
-extern int (*__debugger_sstep)(struct pt_regs *regs);
-extern int (*__debugger_iabr_match)(struct pt_regs *regs);
-extern int (*__debugger_dabr_match)(struct pt_regs *regs);
-extern int (*__debugger_fault_handler)(struct pt_regs *regs);
-
-#define DEBUGGER_BOILERPLATE(__NAME) \
-static inline int __NAME(struct pt_regs *regs) \
-{ \
- if (unlikely(__ ## __NAME)) \
- return __ ## __NAME(regs); \
- return 0; \
-}
-
-DEBUGGER_BOILERPLATE(debugger)
-DEBUGGER_BOILERPLATE(debugger_ipi)
-DEBUGGER_BOILERPLATE(debugger_bpt)
-DEBUGGER_BOILERPLATE(debugger_sstep)
-DEBUGGER_BOILERPLATE(debugger_iabr_match)
-DEBUGGER_BOILERPLATE(debugger_dabr_match)
-DEBUGGER_BOILERPLATE(debugger_fault_handler)
-
-#ifdef CONFIG_XMON
-extern void xmon_init(int enable);
-#endif
-
-#else
-static inline int debugger(struct pt_regs *regs) { return 0; }
-static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
-static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
-static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
-static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
-#endif
-
-extern int set_dabr(unsigned long dabr);
-extern void _exception(int signr, struct pt_regs *regs, int code,
- unsigned long addr);
-extern int fix_alignment(struct pt_regs *regs);
-extern void bad_page_fault(struct pt_regs *regs, unsigned long address,
- int sig);
-extern void show_regs(struct pt_regs * regs);
-extern void low_hash_fault(struct pt_regs *regs, unsigned long address);
-extern int die(const char *str, struct pt_regs *regs, long err);
-
-extern int _get_PVR(void);
-extern void giveup_fpu(struct task_struct *);
-extern void disable_kernel_fp(void);
-extern void flush_fp_to_thread(struct task_struct *);
-extern void enable_kernel_fp(void);
-extern void giveup_altivec(struct task_struct *);
-extern void disable_kernel_altivec(void);
-extern void enable_kernel_altivec(void);
-extern int emulate_altivec(struct pt_regs *);
-extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
-extern void cvt_df(double *from, float *to, struct thread_struct *thread);
-
-#ifdef CONFIG_ALTIVEC
-extern void flush_altivec_to_thread(struct task_struct *);
-#else
-static inline void flush_altivec_to_thread(struct task_struct *t)
-{
-}
-#endif
-
-static inline void flush_spe_to_thread(struct task_struct *t)
-{
-}
-
-extern int mem_init_done; /* set on boot once kmalloc can be called */
-extern unsigned long memory_limit;
-
-/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
-extern unsigned char e2a(unsigned char);
-
-extern struct task_struct *__switch_to(struct task_struct *,
- struct task_struct *);
-#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
-
-struct thread_struct;
-extern struct task_struct * _switch(struct thread_struct *prev,
- struct thread_struct *next);
-
-extern unsigned long klimit;
-
-extern int powersave_nap; /* set if nap mode can be used in idle loop */
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- *
- * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
- * is more like most of the other architectures.
- */
-static __inline__ unsigned long
-__xchg_u32(volatile unsigned int *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: lwarx %0,0,%3 # __xchg_u32\n\
- stwcx. %2,0,%3\n\
-2: bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (dummy), "=m" (*m)
- : "r" (val), "r" (m)
- : "cc", "memory");
-
- return (dummy);
-}
-
-static __inline__ unsigned long
-__xchg_u64(volatile long *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: ldarx %0,0,%3 # __xchg_u64\n\
- stdcx. %2,0,%3\n\
-2: bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (dummy), "=m" (*m)
- : "r" (val), "r" (m)
- : "cc", "memory");
-
- return (dummy);
-}
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __inline__ unsigned long
-__xchg(volatile void *ptr, unsigned long x, unsigned int size)
-{
- switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-#define xchg(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
- })
-
-#define tas(ptr) (xchg((ptr),1))
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static __inline__ unsigned long
-__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
-{
- unsigned int prev;
-
- __asm__ __volatile__ (
- EIEIO_ON_SMP
-"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n\
- stwcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
- : "cc", "memory");
-
- return prev;
-}
-
-static __inline__ unsigned long
-__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
-{
- unsigned long prev;
-
- __asm__ __volatile__ (
- EIEIO_ON_SMP
-"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
- : "cc", "memory");
-
- return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
- case 8:
- return __cmpxchg_u64(ptr, old, new);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-/*
- * We handle most unaligned accesses in hardware. On the other hand
- * unaligned DMA can be very expensive on some ppc64 IO chips (it does
- * powers of 2 writes until it reaches sufficient alignment).
- *
- * Based on this we disable the IP header alignment in network drivers.
- */
-#define NET_IP_ALIGN 0
-
-#define arch_align_stack(x) (x)
-
-extern unsigned long reloc_offset(void);
-
-#endif /* __KERNEL__ */
-#endif