aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig15
-rw-r--r--arch/x86/kernel/io_apic_32.c1
-rw-r--r--arch/x86/kernel/mfgpt_32.c8
-rw-r--r--arch/x86/mm/init_32.c36
-rw-r--r--arch/x86/mm/init_64.c9
-rw-r--r--arch/x86/mm/ioremap.c15
-rw-r--r--arch/x86/mm/pat.c9
-rw-r--r--arch/x86/xen/mmu.c4
8 files changed, 34 insertions, 63 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e5790fe9e33..a12dbb2b93f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -86,9 +86,6 @@ config GENERIC_GPIO
config ARCH_MAY_HAVE_PC_FDC
def_bool y
-config DMI
- def_bool y
-
config RWSEM_GENERIC_SPINLOCK
def_bool !X86_XADD
@@ -114,6 +111,9 @@ config GENERIC_TIME_VSYSCALL
config ARCH_HAS_CPU_RELAX
def_bool y
+config ARCH_HAS_CACHE_LINE_SIZE
+ def_bool y
+
config HAVE_SETUP_PER_CPU_AREA
def_bool X86_64 || (X86_SMP && !X86_VOYAGER)
@@ -482,6 +482,15 @@ config HPET_EMULATE_RTC
# Mark as embedded because too many people got it wrong.
# The code disables itself when not needed.
+config DMI
+ default y
+ bool "Enable DMI scanning" if EMBEDDED
+ help
+ Enabled scanning of DMI to identify machine quirks. Say Y
+ here unless you have verified that your setup is not
+ affected by entries in the DMI blacklist. Required by PNP
+ BIOS code.
+
config GART_IOMMU
bool "GART IOMMU support" if EMBEDDED
default y
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 696b8e4e66b..a40d54fc1fd 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -2444,6 +2444,7 @@ void destroy_irq(unsigned int irq)
dynamic_irq_cleanup(irq);
spin_lock_irqsave(&vector_lock, flags);
+ clear_bit(irq_vector[irq], used_vectors);
irq_vector[irq] = 0;
spin_unlock_irqrestore(&vector_lock, flags);
}
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index cfc2648d25f..3cad17fe026 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -63,7 +63,7 @@ static int __init mfgpt_fix(char *s)
/* The following udocumented bit resets the MFGPT timers */
val = 0xFF; dummy = 0;
- wrmsr(0x5140002B, val, dummy);
+ wrmsr(MSR_MFGPT_SETUP, val, dummy);
return 1;
}
__setup("mfgptfix", mfgpt_fix);
@@ -127,17 +127,17 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
* 6; that is, resets for 7 and 8 will be ignored. Is this
* a problem? -dilinger
*/
- msr = MFGPT_NR_MSR;
+ msr = MSR_MFGPT_NR;
mask = 1 << (timer + 24);
break;
case MFGPT_EVENT_NMI:
- msr = MFGPT_NR_MSR;
+ msr = MSR_MFGPT_NR;
mask = 1 << (timer + shift);
break;
case MFGPT_EVENT_IRQ:
- msr = MFGPT_IRQ_MSR;
+ msr = MSR_MFGPT_IRQ;
mask = 1 << (timer + shift);
break;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4a476189295..de236e419cb 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -287,47 +287,17 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
pkmap_page_table = pte;
}
-static void __meminit free_new_highpage(struct page *page)
-{
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
-}
-
void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
{
if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
ClearPageReserved(page);
- free_new_highpage(page);
+ init_page_count(page);
+ __free_page(page);
+ totalhigh_pages++;
} else
SetPageReserved(page);
}
-static int __meminit
-add_one_highpage_hotplug(struct page *page, unsigned long pfn)
-{
- free_new_highpage(page);
- totalram_pages++;
-#ifdef CONFIG_FLATMEM
- max_mapnr = max(pfn, max_mapnr);
-#endif
- num_physpages++;
-
- return 0;
-}
-
-/*
- * Not currently handling the NUMA case.
- * Assuming single node and all memory that
- * has been added dynamically that would be
- * onlined here is in HIGHMEM.
- */
-void __meminit online_page(struct page *page)
-{
- ClearPageReserved(page);
- add_one_highpage_hotplug(page, page_to_pfn(page));
-}
-
#ifndef CONFIG_NUMA
static void __init set_highmem_pages_init(int bad_ppro)
{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5fbb8652cf5..32ba13b0f81 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -620,15 +620,6 @@ void __init paging_init(void)
/*
* Memory hotplug specific functions
*/
-void online_page(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalram_pages++;
- num_physpages++;
-}
-
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory is added always to NORMAL zone. This means you will never get
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index d176b23110c..804de18abcc 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -117,8 +117,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
- unsigned long prot_val)
+static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ unsigned long size, unsigned long prot_val, void *caller)
{
unsigned long pfn, offset, vaddr;
resource_size_t last_addr;
@@ -212,7 +212,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
/*
* Ok, go for it..
*/
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
area->phys_addr = phys_addr;
@@ -255,7 +255,8 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
*/
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{
- return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
+ return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_nocache);
@@ -272,7 +273,8 @@ EXPORT_SYMBOL(ioremap_nocache);
void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
{
if (pat_wc_enabled)
- return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
+ return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
+ __builtin_return_address(0));
else
return ioremap_nocache(phys_addr, size);
}
@@ -280,7 +282,8 @@ EXPORT_SYMBOL(ioremap_wc);
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
{
- return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
+ return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index e7ca7fc48d1..277446cd30b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -387,8 +387,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
break;
}
- printk(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
- saved_ptr->start, saved_ptr->end);
+ pr_debug(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
+ saved_ptr->start, saved_ptr->end);
/* No conflict. Go ahead and add this new entry */
list_add(&new_entry->nd, &saved_ptr->nd);
new_entry = NULL;
@@ -510,7 +510,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
{
u64 offset = ((u64) pfn) << PAGE_SHIFT;
unsigned long flags = _PAGE_CACHE_UC_MINUS;
- unsigned long ret_flags;
int retval;
if (!range_is_allowed(pfn, size))
@@ -549,14 +548,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
if (flags != _PAGE_CACHE_UC_MINUS) {
retval = reserve_memtype(offset, offset + size, flags, NULL);
} else {
- retval = reserve_memtype(offset, offset + size, -1, &ret_flags);
+ retval = reserve_memtype(offset, offset + size, -1, &flags);
}
if (retval < 0)
return 0;
- flags = ret_flags;
-
if (pfn <= max_pfn_mapped &&
ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
free_memtype(offset, offset + size);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 6cbcf65609a..126766d43ae 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -387,7 +387,7 @@ static void xen_do_pin(unsigned level, unsigned long pfn)
static int pin_page(struct page *page, enum pt_level level)
{
- unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
+ unsigned pgfl = TestSetPagePinned(page);
int flush;
if (pgfl)
@@ -468,7 +468,7 @@ void __init xen_mark_init_mm_pinned(void)
static int unpin_page(struct page *page, enum pt_level level)
{
- unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
+ unsigned pgfl = TestClearPagePinned(page);
if (pgfl && !PageHighMem(page)) {
void *pt = lowmem_page_address(page);