diff options
author | Jonathan Corbet <corbet@lwn.net> | 2008-07-14 15:29:34 -0600 |
---|---|---|
committer | Jonathan Corbet <corbet@lwn.net> | 2008-07-14 15:29:34 -0600 |
commit | 2fceef397f9880b212a74c418290ce69e7ac00eb (patch) | |
tree | d9cc09ab992825ef7fede4a688103503e3caf655 /arch/s390 | |
parent | feae1ef116ed381625d3731c5ae4f4ebcb3fa302 (diff) | |
parent | bce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff) |
Merge commit 'v2.6.26' into bkl-removal
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 4 | ||||
-rw-r--r-- | arch/s390/appldata/appldata_base.c | 8 | ||||
-rw-r--r-- | arch/s390/defconfig | 11 | ||||
-rw-r--r-- | arch/s390/kernel/dis.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/init_task.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/s390/kvm/diag.c | 2 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 7 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 13 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 49 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 44 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 20 |
12 files changed, 104 insertions, 61 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1d035082e78..107e492cb47 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -304,10 +304,14 @@ config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_VMEMMAP_ENABLE select SPARSEMEM_VMEMMAP + select SPARSEMEM_STATIC if !64BIT config ARCH_SPARSEMEM_DEFAULT def_bool y +config ARCH_SELECT_MEMORY_MODEL + def_bool y + source "mm/Kconfig" comment "I/O subsystem configuration" diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 655d52543e2..ad40729bec3 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -130,6 +130,7 @@ static void appldata_work_fn(struct work_struct *work) P_DEBUG(" -= Work Queue =-\n"); i = 0; + get_online_cpus(); spin_lock(&appldata_ops_lock); list_for_each(lh, &appldata_ops_list) { ops = list_entry(lh, struct appldata_ops, list); @@ -140,6 +141,7 @@ static void appldata_work_fn(struct work_struct *work) } } spin_unlock(&appldata_ops_lock); + put_online_cpus(); } /* @@ -266,12 +268,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, len = *lenp; if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) return -EFAULT; + get_online_cpus(); spin_lock(&appldata_timer_lock); if (buf[0] == '1') __appldata_vtimer_setup(APPLDATA_ADD_TIMER); else if (buf[0] == '0') __appldata_vtimer_setup(APPLDATA_DEL_TIMER); spin_unlock(&appldata_timer_lock); + put_online_cpus(); out: *lenp = len; *ppos += len; @@ -314,10 +318,12 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, return -EINVAL; } + get_online_cpus(); spin_lock(&appldata_timer_lock); appldata_interval = interval; __appldata_vtimer_setup(APPLDATA_MOD_TIMER); spin_unlock(&appldata_timer_lock); + put_online_cpus(); P_INFO("Monitoring CPU interval set to %u milliseconds.\n", interval); @@ -556,8 +562,10 @@ static int __init appldata_init(void) return -ENOMEM; } + get_online_cpus(); for_each_online_cpu(i) appldata_online_cpu(i); + put_online_cpus(); /* Register cpu hotplug notifier */ register_hotcpu_notifier(&appldata_nb); diff --git a/arch/s390/defconfig b/arch/s390/defconfig index aa341d0ea1e..c5cdb975d59 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.25 -# Wed Apr 30 11:07:45 2008 +# Linux kernel version: 2.6.26-rc4 +# Fri May 30 09:49:33 2008 # CONFIG_SCHED_MC=y CONFIG_MMU=y @@ -103,6 +103,7 @@ CONFIG_RT_MUTEXES=y # CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set CONFIG_MODVERSIONS=y @@ -173,6 +174,7 @@ CONFIG_PREEMPT=y # CONFIG_PREEMPT_RCU is not set CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_SELECT_MEMORY_MODEL=y # CONFIG_FLATMEM_MANUAL is not set # CONFIG_DISCONTIGMEM_MANUAL is not set @@ -210,6 +212,7 @@ CONFIG_FORCE_MAX_ZONEORDER=9 CONFIG_PFAULT=y # CONFIG_SHARED_KERNEL is not set # CONFIG_CMM is not set +# CONFIG_PAGE_STATES is not set CONFIG_VIRT_TIMER=y CONFIG_VIRT_CPU_ACCOUNTING=y # CONFIG_APPLDATA_BASE is not set @@ -620,6 +623,7 @@ CONFIG_S390_VMUR=m # # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set +CONFIG_ACCESSIBILITY=y # # File systems @@ -754,11 +758,12 @@ CONFIG_FRAME_WARN=2048 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y -CONFIG_HEADERS_CHECK=y +# CONFIG_HEADERS_CHECK is not set CONFIG_DEBUG_KERNEL=y # CONFIG_SCHED_DEBUG is not set # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set # CONFIG_DEBUG_SLAB is not set CONFIG_DEBUG_PREEMPT=y # CONFIG_DEBUG_RT_MUTEXES is not set diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index c14a336f630..d2f270c995d 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -208,7 +208,7 @@ static const unsigned char formats[][7] = { [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ - [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ + [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c index d494161b05b..7ad00396925 100644 --- a/arch/s390/kernel/init_task.c +++ b/arch/s390/kernel/init_task.c @@ -17,7 +17,6 @@ #include <asm/pgtable.h> static struct fs_struct init_fs = INIT_FS; -static struct files_struct init_files = INIT_FILES; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1f4228948dc..5d4fa4b1c74 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -711,7 +711,7 @@ int __cpuinit __cpu_up(unsigned int cpu) memset(sf, 0, sizeof(struct stack_frame)); sf->gprs[9] = (unsigned long) sf; cpu_lowcore->save_area[15] = (unsigned long) sf; - __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); + __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); asm volatile( " stam 0,15,0(%0)" : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); @@ -1089,7 +1089,7 @@ out: #ifdef CONFIG_HOTPLUG_CPU -int smp_rescan_cpus(void) +int __ref smp_rescan_cpus(void) { cpumask_t newcpus; int cpu; diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index f639a152869..a0775e1f08d 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); vcpu->stat.diagnose_44++; vcpu_put(vcpu); - schedule(); + yield(); vcpu_load(vcpu); return 0; } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index fcd1ed8015c..84a7fed4cd4 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -339,6 +339,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) if (kvm_cpu_has_interrupt(vcpu)) return 0; + __set_cpu_idle(vcpu); + spin_lock_bh(&vcpu->arch.local_int.lock); + vcpu->arch.local_int.timer_due = 0; + spin_unlock_bh(&vcpu->arch.local_int.lock); + if (psw_interrupts_disabled(vcpu)) { VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); __unset_cpu_idle(vcpu); @@ -366,8 +371,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) no_timer: spin_lock_bh(&vcpu->arch.local_int.float_int->lock); spin_lock_bh(&vcpu->arch.local_int.lock); - __set_cpu_idle(vcpu); - vcpu->arch.local_int.timer_due = 0; add_wait_queue(&vcpu->arch.local_int.wq, &wait); while (list_empty(&vcpu->arch.local_int.list) && list_empty(&vcpu->arch.local_int.float_int->list) && diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0ac36a649eb..6558b09ff57 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -423,6 +423,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return -EINVAL; /* not implemented yet */ } +extern void s390_handle_mcck(void); + static void __vcpu_run(struct kvm_vcpu *vcpu) { memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); @@ -430,13 +432,21 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) if (need_resched()) schedule(); + if (test_thread_flag(TIF_MCCK_PENDING)) + s390_handle_mcck(); + + kvm_s390_deliver_pending_interrupts(vcpu); + vcpu->arch.sie_block->icptcode = 0; local_irq_disable(); kvm_guest_enter(); local_irq_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); - sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs); + if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { + VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); + kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); local_irq_disable(); @@ -475,7 +485,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) might_sleep(); do { - kvm_s390_deliver_pending_interrupts(vcpu); __vcpu_run(vcpu); rc = kvm_handle_sie_intercept(vcpu); } while (!signal_pending(current) && !rc); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 29f3a63806b..05598649b32 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -44,37 +44,34 @@ char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); void show_mem(void) { - int i, total = 0, reserved = 0; - int shared = 0, cached = 0; + unsigned long i, total = 0, reserved = 0; + unsigned long shared = 0, cached = 0; + unsigned long flags; struct page *page; + pg_data_t *pgdat; printk("Mem-info:\n"); show_free_areas(); - i = max_mapnr; - while (i-- > 0) { - if (!pfn_valid(i)) - continue; - page = pfn_to_page(i); - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page) - 1; + for_each_online_pgdat(pgdat) { + pgdat_resize_lock(pgdat, &flags); + for (i = 0; i < pgdat->node_spanned_pages; i++) { + if (!pfn_valid(pgdat->node_start_pfn + i)) + continue; + page = pfn_to_page(pgdat->node_start_pfn + i); + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (page_count(page)) + shared += page_count(page) - 1; + } + pgdat_resize_unlock(pgdat, &flags); } - printk("%d pages of RAM\n", total); - printk("%d reserved pages\n", reserved); - printk("%d pages shared\n", shared); - printk("%d pages swap cached\n", cached); - - printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); - printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK)); - printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); - printk("%lu pages slab\n", - global_page_state(NR_SLAB_RECLAIMABLE) + - global_page_state(NR_SLAB_UNRECLAIMABLE)); - printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE)); + printk("%ld pages of RAM\n", total); + printk("%ld reserved pages\n", reserved); + printk("%ld pages shared\n", shared); + printk("%ld pages swap cached\n", cached); } /* diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5c1aea97cd1..3d98ba82ea6 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) int s390_enable_sie(void) { struct task_struct *tsk = current; - struct mm_struct *mm; - int rc; + struct mm_struct *mm, *old_mm; - task_lock(tsk); - - rc = 0; + /* Do we have pgstes? if yes, we are done */ if (tsk->mm->context.pgstes) - goto unlock; + return 0; - rc = -EINVAL; + /* lets check if we are allowed to replace the mm */ + task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || - tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) - goto unlock; + tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { + task_unlock(tsk); + return -EINVAL; + } + task_unlock(tsk); - tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ + /* we copy the mm with pgstes enabled */ + tsk->mm->context.pgstes = 1; mm = dup_mm(tsk); tsk->mm->context.pgstes = 0; - - rc = -ENOMEM; if (!mm) - goto unlock; - mmput(tsk->mm); + return -ENOMEM; + + /* Now lets check again if somebody attached ptrace etc */ + task_lock(tsk); + if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || + tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { + mmput(mm); + task_unlock(tsk); + return -EINVAL; + } + + /* ok, we are alone. No ptrace, no threads, etc. */ + old_mm = tsk->mm; tsk->mm = tsk->active_mm = mm; preempt_disable(); update_mm(mm, tsk); cpu_set(smp_processor_id(), mm->cpu_vm_mask); preempt_enable(); - rc = 0; -unlock: task_unlock(tsk); - return rc; + mmput(old_mm); + return 0; } EXPORT_SYMBOL_GPL(s390_enable_sie); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ea2804808f3..e4868bfc672 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -27,12 +27,19 @@ struct memory_segment { static LIST_HEAD(mem_segs); -static pud_t *vmem_pud_alloc(void) +static void __ref *vmem_alloc_pages(unsigned int order) +{ + if (slab_is_available()) + return (void *)__get_free_pages(GFP_KERNEL, order); + return alloc_bootmem_pages((1 << order) * PAGE_SIZE); +} + +static inline pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; #ifdef CONFIG_64BIT - pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); + pud = vmem_alloc_pages(2); if (!pud) return NULL; clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); @@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void) return pud; } -static pmd_t *vmem_pmd_alloc(void) +static inline pmd_t *vmem_pmd_alloc(void) { pmd_t *pmd = NULL; #ifdef CONFIG_64BIT - pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); + pmd = vmem_alloc_pages(2); if (!pmd) return NULL; clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); @@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) if (pte_none(*pt_dir)) { unsigned long new_page; - new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); + new_page =__pa(vmem_alloc_pages(0)); if (!new_page) goto out; pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); *pt_dir = pte; } } + memset(start, 0, nr * sizeof(struct page)); ret = 0; out: flush_tlb_kernel_range(start_addr, end_addr); @@ -228,7 +236,7 @@ static int insert_memory_segment(struct memory_segment *seg) { struct memory_segment *tmp; - if (seg->start + seg->size >= VMEM_MAX_PHYS || + if (seg->start + seg->size > VMEM_MAX_PHYS || seg->start + seg->size < seg->start) return -ERANGE; |