diff options
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r-- | arch/x86_64/mm/fault.c | 47 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 121 | ||||
-rw-r--r-- | arch/x86_64/mm/ioremap.c | 5 |
3 files changed, 112 insertions, 61 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 55250593d8c..08dc696f54e 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -41,6 +41,41 @@ #define PF_RSVD (1<<3) #define PF_INSTR (1<<4) +#ifdef CONFIG_KPROBES +ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); + +/* Hook to register for page fault notifications */ +int register_page_fault_notifier(struct notifier_block *nb) +{ + vmalloc_sync_all(); + return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); +} + +int unregister_page_fault_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); +} + +static inline int notify_page_fault(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .str = str, + .err = err, + .trapnr = trap, + .signr = sig + }; + return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); +} +#else +static inline int notify_page_fault(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + return NOTIFY_DONE; +} +#endif + void bust_spinlocks(int yes) { int loglevel_save = console_loglevel; @@ -160,7 +195,7 @@ void dump_pagetable(unsigned long address) printk("PGD %lx ", pgd_val(*pgd)); if (!pgd_present(*pgd)) goto ret; - pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address); + pud = pud_offset(pgd, address); if (bad_address(pud)) goto bad; printk("PUD %lx ", pud_val(*pud)); if (!pud_present(*pud)) goto ret; @@ -348,7 +383,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if (vmalloc_fault(address) >= 0) return; } - if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, SIGSEGV) == NOTIFY_STOP) return; /* @@ -358,7 +393,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, goto bad_area_nosemaphore; } - if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, SIGSEGV) == NOTIFY_STOP) return; @@ -410,8 +445,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (error_code & 4) { - // XXX: align red zone size with ABI - if (address + 128 < regs->rsp) + /* Allow userspace just enough access below the stack pointer + * to let the 'enter' instruction work. + */ + if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp) goto bad_area; } if (expand_stack(vma, address)) diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 4ba34e95d83..95bd232ff0c 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -23,6 +23,7 @@ #include <linux/bootmem.h> #include <linux/proc_fs.h> #include <linux/pci.h> +#include <linux/poison.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/memory_hotplug.h> @@ -41,8 +42,6 @@ #include <asm/proto.h> #include <asm/smp.h> #include <asm/sections.h> -#include <asm/dma-mapping.h> -#include <asm/swiotlb.h> #ifndef Dprintk #define Dprintk(x...) @@ -90,8 +89,6 @@ void show_mem(void) printk(KERN_INFO "%lu pages swap cached\n",cached); } -/* References to section boundaries */ - int after_bootmem; static __init void *spp_getpage(void) @@ -261,9 +258,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { unsigned long entry; - if (address > end) { - for (; i < PTRS_PER_PMD; i++, pmd++) - set_pmd(pmd, __pmd(0)); + if (address >= end) { + if (!after_bootmem) + for (; i < PTRS_PER_PMD; i++, pmd++) + set_pmd(pmd, __pmd(0)); break; } entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; @@ -341,7 +339,8 @@ static void __init find_early_table_space(unsigned long end) table_end = table_start; early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", - end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT); + end, table_start << PAGE_SHIFT, + (table_start << PAGE_SHIFT) + tables); } /* Setup the direct mapping of the physical memory at PAGE_OFFSET. @@ -372,7 +371,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end) pud_t *pud; if (after_bootmem) - pud = pud_offset_k(pgd, start & PGDIR_MASK); + pud = pud_offset(pgd, start & PGDIR_MASK); else pud = alloc_low_page(&map, &pud_phys); @@ -508,8 +507,6 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size) /* * Memory hotplug specific functions */ -#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) - void online_page(struct page *page) { ClearPageReserved(page); @@ -519,31 +516,17 @@ void online_page(struct page *page) num_physpages++; } -#ifndef CONFIG_MEMORY_HOTPLUG +#ifdef CONFIG_MEMORY_HOTPLUG /* - * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, - * just online the pages. + * XXX: memory_add_physaddr_to_nid() is to find node id from physical address + * via probe interface of sysfs. If acpi notifies hot-add event, then it + * can tell node id by searching dsdt. But, probe interface doesn't have + * node id. So, return 0 as node id at this time. */ -int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) +#ifdef CONFIG_NUMA +int memory_add_physaddr_to_nid(u64 start) { - int err = -EIO; - unsigned long pfn; - unsigned long total = 0, mem = 0; - for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { - if (pfn_valid(pfn)) { - online_page(pfn_to_page(pfn)); - err = 0; - mem++; - } - total++; - } - if (!err) { - z->spanned_pages += total; - z->present_pages += mem; - z->zone_pgdat->node_spanned_pages += total; - z->zone_pgdat->node_present_pages += mem; - } - return err; + return 0; } #endif @@ -551,9 +534,9 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) * Memory is added always to NORMAL zone. This means you will never get * additional DMA/DMA32 memory. */ -int add_memory(u64 start, u64 size) +int arch_add_memory(int nid, u64 start, u64 size) { - struct pglist_data *pgdat = NODE_DATA(0); + struct pglist_data *pgdat = NODE_DATA(nid); struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; @@ -570,7 +553,7 @@ error: printk("%s: Problem encountered in __add_pages!\n", __func__); return ret; } -EXPORT_SYMBOL_GPL(add_memory); +EXPORT_SYMBOL_GPL(arch_add_memory); int remove_memory(u64 start, u64 size) { @@ -578,7 +561,33 @@ int remove_memory(u64 start, u64 size) } EXPORT_SYMBOL_GPL(remove_memory); -#endif +#else /* CONFIG_MEMORY_HOTPLUG */ +/* + * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, + * just online the pages. + */ +int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) +{ + int err = -EIO; + unsigned long pfn; + unsigned long total = 0, mem = 0; + for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { + if (pfn_valid(pfn)) { + online_page(pfn_to_page(pfn)); + err = 0; + mem++; + } + total++; + } + if (!err) { + z->spanned_pages += total; + z->present_pages += mem; + z->zone_pgdat->node_spanned_pages += total; + z->zone_pgdat->node_present_pages += mem; + } + return err; +} +#endif /* CONFIG_MEMORY_HOTPLUG */ static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, kcore_vsyscall; @@ -587,10 +596,7 @@ void __init mem_init(void) { long codesize, reservedpages, datasize, initsize; -#ifdef CONFIG_SWIOTLB - pci_swiotlb_init(); -#endif - no_iommu_init(); + pci_iommu_alloc(); /* How many end-of-memory variables you have, grandma! */ max_low_pfn = end_pfn; @@ -644,20 +650,31 @@ void __init mem_init(void) #endif } -void free_initmem(void) +void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr; - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { + if (begin >= end) + return; + + printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); + for (addr = begin; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); - memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); + memset((void *)(addr & ~(PAGE_SIZE-1)), + POISON_FREE_INITMEM, PAGE_SIZE); free_page(addr); totalram_pages++; } - memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin); - printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); +} + +void free_initmem(void) +{ + memset(__initdata_begin, POISON_FREE_INITDATA, + __initdata_end - __initdata_begin); + free_init_pages("unused kernel memory", + (unsigned long)(&__init_begin), + (unsigned long)(&__init_end)); } #ifdef CONFIG_DEBUG_RODATA @@ -686,15 +703,7 @@ void mark_rodata_ro(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - if (start >= end) - return; - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_init_pages("initrd memory", start, end); } #endif diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c index ae207064201..45d7d823c3b 100644 --- a/arch/x86_64/mm/ioremap.c +++ b/arch/x86_64/mm/ioremap.c @@ -11,6 +11,7 @@ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/module.h> #include <asm/io.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> @@ -219,6 +220,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l } return (__force void __iomem *) (offset + (char *)addr); } +EXPORT_SYMBOL(__ioremap); /** * ioremap_nocache - map bus memory into CPU space @@ -246,6 +248,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) { return __ioremap(phys_addr, size, _PAGE_PCD); } +EXPORT_SYMBOL(ioremap_nocache); /** * iounmap - Free a IO remapping @@ -291,3 +294,5 @@ void iounmap(volatile void __iomem *addr) BUG_ON(p != o || o == NULL); kfree(p); } +EXPORT_SYMBOL(iounmap); + |