From 2d4b1fa234417b902c9d3034442387c1805bfa7b Mon Sep 17 00:00:00 2001 From: Bob Picco Date: Tue, 4 Oct 2005 15:13:57 -0400 Subject: [PATCH] V5 ia64 SPARSEMEM - SPARSEMEM code changes This patch is the minimal set of changes required by ia64 to use SPARSEMEM. Signed-off-by: Bob Picco Signed-off-by: Tony Luck --- arch/ia64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64/mm/init.c') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1281c609ee9..98246acd499 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -593,7 +593,7 @@ mem_init (void) platform_dma_init(); #endif -#ifndef CONFIG_DISCONTIGMEM +#ifdef CONFIG_FLATMEM if (!mem_map) BUG(); max_mapnr = max_low_pfn; -- cgit v1.2.3 From 46dea3d092d23a58b42499cc8a21de0fad079f4a Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sat, 29 Oct 2005 18:16:20 -0700 Subject: [PATCH] mm: ia64 use expand_upwards ia64 has expand_backing_store function for growing its Register Backing Store vma upwards. But more complete code for this purpose is found in the CONFIG_STACK_GROWSUP part of mm/mmap.c. Uglify its #ifdefs further to provide expand_upwards for ia64 as well as expand_stack for parisc. The Register Backing Store vma should be marked VM_ACCOUNT. Implement the intention of growing it only a page at a time, instead of passing an address outside of the vma to handle_mm_fault, with unknown consequences. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64/mm/init.c') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 98246acd499..0063b2c5090 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -158,7 +158,7 @@ ia64_init_addr_space (void) vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; - vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP; + vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { up_write(¤t->mm->mmap_sem); -- cgit v1.2.3 From 872fec16d9a0ed3b75b8893aa217e49cca575ee5 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sat, 29 Oct 2005 18:16:21 -0700 Subject: [PATCH] mm: init_mm without ptlock First step in pushing down the page_table_lock. init_mm.page_table_lock has been used throughout the architectures (usually for ioremap): not to serialize kernel address space allocation (that's usually vmlist_lock), but because pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it. Reverse that: don't lock or unlock init_mm.page_table_lock in any of the architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take and drop it when allocating a new one, to check lest a racing task already did. Similarly no page_table_lock in vmalloc's map_vm_area. Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle user mms, which are converted only by a later patch, for now they have to lock differently according to whether or not it's init_mm. If sources get muddled, there's a danger that an arch source taking init_mm.page_table_lock will be mixed with common source also taking it (or neither take it). So break the rules and make another change, which should break the build for such a mismatch: remove the redundant mm arg from pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13). Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64 used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64 map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free took page_table_lock for no good reason. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/init.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'arch/ia64/mm/init.c') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 0063b2c5090..e3215ba64ff 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -275,26 +275,21 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ - spin_lock(&init_mm.page_table_lock); { pud = pud_alloc(&init_mm, pgd, address); if (!pud) goto out; - pmd = pmd_alloc(&init_mm, pud, address); if (!pmd) goto out; - pte = pte_alloc_map(&init_mm, pmd, address); + pte = pte_alloc_kernel(pmd, address); if (!pte) goto out; - if (!pte_none(*pte)) { - pte_unmap(pte); + if (!pte_none(*pte)) goto out; - } set_pte(pte, mk_pte(page, pgprot)); - pte_unmap(pte); } - out: spin_unlock(&init_mm.page_table_lock); + out: /* no need for flush_tlb */ return page; } -- cgit v1.2.3