From 00b65985fb2fc542b855b03fcda0d0f2bab4f442 Mon Sep 17 00:00:00 2001 From: "Chen, Kenneth W" Date: Fri, 13 Oct 2006 10:08:13 -0700 Subject: [IA64] relax per-cpu TLB requirement to DTC Instead of pinning per-cpu TLB into a DTR, use DTC. This will free up one TLB entry for application, or even kernel if access pattern to per-cpu data area has high temporal locality. Since per-cpu is mapped at the top of region 7 address, we just need to add special case in alt_dtlb_miss. The physical address of per-cpu data is already conveniently stored in IA64_KR(PER_CPU_DATA). Latency for alt_dtlb_miss is not affected as we can hide all the latency. It was measured that alt_dtlb_miss handler has 23 cycles latency before and after the patch. The performance effect is massive for applications that put lots of tlb pressure on CPU. Workload environment like database online transaction processing or application uses tera-byte of memory would benefit the most. Measurement with industry standard database benchmark shown an upward of 1.6% gain. While smaller workloads like cpu, java also showing small improvement. Signed-off-by: Ken Chen Signed-off-by: Tony Luck --- arch/ia64/mm/init.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'arch/ia64/mm') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1373fae7657..07d82cd7cbd 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -337,7 +337,7 @@ setup_gate (void) void __devinit ia64_mmu_init (void *my_cpu_data) { - unsigned long psr, pta, impl_va_bits; + unsigned long pta, impl_va_bits; extern void __devinit tlb_init (void); #ifdef CONFIG_DISABLE_VHPT @@ -346,15 +346,6 @@ ia64_mmu_init (void *my_cpu_data) # define VHPT_ENABLE_BIT 1 #endif - /* Pin mapping for percpu area into TLB */ - psr = ia64_clear_ic(); - ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, - pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), - PERCPU_PAGE_SHIFT); - - ia64_set_psr(psr); - ia64_srlz_i(); - /* * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped * address space. The IA-64 architecture guarantees that at least 50 bits of -- cgit v1.2.3 From c4add2e537e6f60048dce8dc518254e7e605301d Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 30 Mar 2007 10:33:11 -0600 Subject: [IA64] rename ioremap variables to match i386 No functional change, just use the same names as i386. Signed-off-by: Bjorn Helgaas Signed-off-by: Tony Luck --- arch/ia64/mm/ioremap.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/ia64/mm') diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 4280c074d64..1bc0c172726 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -14,13 +14,13 @@ #include static inline void __iomem * -__ioremap (unsigned long offset, unsigned long size) +__ioremap (unsigned long phys_addr, unsigned long size) { - return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset); + return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); } void __iomem * -ioremap (unsigned long offset, unsigned long size) +ioremap (unsigned long phys_addr, unsigned long size) { u64 attr; unsigned long gran_base, gran_size; @@ -30,31 +30,31 @@ ioremap (unsigned long offset, unsigned long size) * as the rest of the kernel. For more details, see * Documentation/ia64/aliasing.txt. */ - attr = kern_mem_attribute(offset, size); + attr = kern_mem_attribute(phys_addr, size); if (attr & EFI_MEMORY_WB) - return (void __iomem *) phys_to_virt(offset); + return (void __iomem *) phys_to_virt(phys_addr); else if (attr & EFI_MEMORY_UC) - return __ioremap(offset, size); + return __ioremap(phys_addr, size); /* * Some chipsets don't support UC access to memory. If * WB is supported for the whole granule, we prefer that. */ - gran_base = GRANULEROUNDDOWN(offset); - gran_size = GRANULEROUNDUP(offset + size) - gran_base; + gran_base = GRANULEROUNDDOWN(phys_addr); + gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base; if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) - return (void __iomem *) phys_to_virt(offset); + return (void __iomem *) phys_to_virt(phys_addr); - return __ioremap(offset, size); + return __ioremap(phys_addr, size); } EXPORT_SYMBOL(ioremap); void __iomem * -ioremap_nocache (unsigned long offset, unsigned long size) +ioremap_nocache (unsigned long phys_addr, unsigned long size) { - if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) + if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) return NULL; - return __ioremap(offset, size); + return __ioremap(phys_addr, size); } EXPORT_SYMBOL(ioremap_nocache); -- cgit v1.2.3 From 9b50ffb0c0281bc5a08ccd56ae9bb84296c28f38 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 30 Mar 2007 10:34:05 -0600 Subject: [IA64] make ioremap avoid unsupported attributes Example memory map (from HP sx1000 with VGA enabled): 0x00000 - 0x9FFFF supports only WB (cacheable) access 0xA0000 - 0xBFFFF supports only UC (uncacheable) access 0xC0000 - 0xFFFFF supports only WB (cacheable) access pci_read_rom() indirectly uses ioremap(0xC0000) to read the shadow VGA option ROM. ioremap() used to default to a 16MB or 64MB UC kernel identity mapping, which would cause an MCA when reading 0xC0000 since only WB is supported there. X uses reads the option ROM to initialize devices. A smaller test case is: # echo 1 > /sys/bus/pci/devices/0000:aa:03.0/rom # cp /sys/bus/pci/devices/0000:aa:03.0/rom x To avoid this, we can use the same ioremap_page_range() strategy that most architectures use for all ioremaps. These page table mappings come out of the vmalloc area. On ia64, these are in region 5 (0xA... addresses) and typically use 16KB or 64KB mappings instead of 16MB or 64MB mappings. The smaller mappings give more flexibility to use the correct attributes. Signed-off-by: Bjorn Helgaas Signed-off-by: Tony Luck --- arch/ia64/mm/ioremap.c | 60 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 5 deletions(-) (limited to 'arch/ia64/mm') diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 1bc0c172726..2a140627dfd 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -1,5 +1,5 @@ /* - * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. + * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas * * This program is free software; you can redistribute it and/or modify @@ -10,11 +10,13 @@ #include #include #include +#include +#include #include #include static inline void __iomem * -__ioremap (unsigned long phys_addr, unsigned long size) +__ioremap (unsigned long phys_addr) { return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); } @@ -22,8 +24,13 @@ __ioremap (unsigned long phys_addr, unsigned long size) void __iomem * ioremap (unsigned long phys_addr, unsigned long size) { + void __iomem *addr; + struct vm_struct *area; + unsigned long offset; + pgprot_t prot; u64 attr; unsigned long gran_base, gran_size; + unsigned long page_base; /* * For things in kern_memmap, we must use the same attribute @@ -34,7 +41,7 @@ ioremap (unsigned long phys_addr, unsigned long size) if (attr & EFI_MEMORY_WB) return (void __iomem *) phys_to_virt(phys_addr); else if (attr & EFI_MEMORY_UC) - return __ioremap(phys_addr, size); + return __ioremap(phys_addr); /* * Some chipsets don't support UC access to memory. If @@ -45,7 +52,42 @@ ioremap (unsigned long phys_addr, unsigned long size) if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) return (void __iomem *) phys_to_virt(phys_addr); - return __ioremap(phys_addr, size); + /* + * WB is not supported for the whole granule, so we can't use + * the region 7 identity mapping. If we can safely cover the + * area with kernel page table mappings, we can use those + * instead. + */ + page_base = phys_addr & PAGE_MASK; + size = PAGE_ALIGN(phys_addr + size) - page_base; + if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) { + prot = PAGE_KERNEL; + + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + + /* + * Ok, go for it.. + */ + area = get_vm_area(size, VM_IOREMAP); + if (!area) + return NULL; + + area->phys_addr = phys_addr; + addr = (void __iomem *) area->addr; + if (ioremap_page_range((unsigned long) addr, + (unsigned long) addr + size, phys_addr, prot)) { + vunmap((void __force *) addr); + return NULL; + } + + return (void __iomem *) (offset + (char __iomem *)addr); + } + + return __ioremap(phys_addr); } EXPORT_SYMBOL(ioremap); @@ -55,6 +97,14 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) return NULL; - return __ioremap(phys_addr, size); + return __ioremap(phys_addr); } EXPORT_SYMBOL(ioremap_nocache); + +void +iounmap (volatile void __iomem *addr) +{ + if (REGION_NUMBER(addr) == RGN_GATE) + vunmap((void *) ((unsigned long) addr & PAGE_MASK)); +} +EXPORT_SYMBOL(iounmap); -- cgit v1.2.3