aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-14 13:25:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-14 13:25:01 -0700
commitb7f80afa28866c257876c272d6c013e0dbed3c31 (patch)
tree4e72598307cda046a2e0db5e7c7cb1d8a15574ae /arch/s390/kernel
parent42c59208219a2d43f0dde94bebc68c20b95b13ce (diff)
parent5e34599fc8ba1e8889095bd56a71fd9802ed5a51 (diff)
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (71 commits) [S390] sclp_tty: Fix scheduling while atomic bug. [S390] sclp_tty: remove ioctl interface. [S390] Remove P390 support. [S390] Cleanup vmcp printk messages. [S390] Cleanup lcs printk messages. [S390] Cleanup kprobes printk messages. [S390] Cleanup vmwatch printk messages. [S390] Cleanup dcssblk printk messages. [S390] Cleanup zfcp dumper printk messages. [S390] Cleanup vmlogrdr printk messages. [S390] Cleanup s390 debug feature print messages. [S390] Cleanup monreader printk messages. [S390] Cleanup appldata printk messages. [S390] Cleanup smsgiucv printk messages. [S390] Cleanup cpacf printk messages. [S390] Cleanup qeth print messages. [S390] Cleanup netiucv printk messages. [S390] Cleanup iucv printk messages. [S390] Cleanup sclp printk messages. [S390] Cleanup zcrypt printk messages. ...
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile9
-rw-r--r--arch/s390/kernel/binfmt_elf32.c214
-rw-r--r--arch/s390/kernel/compat_ptrace.h4
-rw-r--r--arch/s390/kernel/debug.c9
-rw-r--r--arch/s390/kernel/early.c211
-rw-r--r--arch/s390/kernel/ipl.c462
-rw-r--r--arch/s390/kernel/kprobes.c4
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/mem_detect.c100
-rw-r--r--arch/s390/kernel/process.c32
-rw-r--r--arch/s390/kernel/ptrace.c363
-rw-r--r--arch/s390/kernel/setup.c51
-rw-r--r--arch/s390/kernel/time.c634
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/vtime.c81
15 files changed, 1410 insertions, 767 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 6302f508258..50f657e7734 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -7,9 +7,14 @@
#
CFLAGS_smp.o := -Wno-nonnull
+#
+# Pass UTS_MACHINE for user_regset definition
+#
+CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
obj-y := bitmap.o traps.o time.o process.o base.o early.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
- s390_ext.o debug.o irq.o ipl.o dis.o diag.o
+ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -23,7 +28,7 @@ obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
compat_wrapper.o compat_exec_domain.o \
- binfmt_elf32.o $(compat-obj-y)
+ $(compat-obj-y)
obj-$(CONFIG_VIRT_TIMER) += vtime.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
deleted file mode 100644
index 3e1c315b736..00000000000
--- a/arch/s390/kernel/binfmt_elf32.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Support for 32-bit Linux for S390 ELF binaries.
- *
- * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Gerhard Tonn (ton@de.ibm.com)
- *
- * Heavily inspired by the 32-bit Sparc compat code which is
- * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
- */
-
-#define __ASMS390_ELF_H
-
-#include <linux/time.h>
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_CLASS ELFCLASS32
-#define ELF_DATA ELFDATA2MSB
-#define ELF_ARCH EM_S390
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) \
- (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
- && (x)->e_ident[EI_CLASS] == ELF_CLASS)
-
-/* ELF register definitions */
-#define NUM_GPRS 16
-#define NUM_FPRS 16
-#define NUM_ACRS 16
-
-/* For SVR4/S390 the function pointer to be registered with `atexit` is
- passed in R14. */
-#define ELF_PLAT_INIT(_r, load_addr) \
- do { \
- _r->gprs[14] = 0; \
- } while(0)
-
-#define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE 4096
-
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-
-/* Wow, the "main" arch needs arch dependent functions too.. :) */
-
-/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
- now struct_user_regs, they are different) */
-
-#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
-
-#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
-
-#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
-
-/* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. */
-
-#define ELF_HWCAP (0)
-
-/* This yields a string that ld.so will use to load implementation
- specific libraries for optimization. This is more specific in
- intent than poking at uname or /proc/cpuinfo.
-
- For the moment, we have only optimizations for the Intel generations,
- but that could change... */
-
-#define ELF_PLATFORM (NULL)
-
-#define SET_PERSONALITY(ex, ibcs2) \
-do { \
- if (ibcs2) \
- set_personality(PER_SVR4); \
- else if (current->personality != PER_LINUX32) \
- set_personality(PER_LINUX); \
- set_thread_flag(TIF_31BIT); \
-} while (0)
-
-#include "compat_linux.h"
-
-typedef _s390_fp_regs32 elf_fpregset_t;
-
-typedef struct
-{
-
- _psw_t32 psw;
- __u32 gprs[__NUM_GPRS];
- __u32 acrs[__NUM_ACRS];
- __u32 orig_gpr2;
-} s390_regs32;
-typedef s390_regs32 elf_gregset_t;
-
-static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
-{
- int i;
-
- memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
- memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
- for (i = 0; i < NUM_GPRS; i++)
- regs->gprs[i] = ptregs->gprs[i];
- save_access_regs(regs->acrs);
- regs->orig_gpr2 = ptregs->orig_gpr2;
- return 1;
-}
-
-static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
-{
- struct pt_regs *ptregs = task_pt_regs(tsk);
- int i;
-
- memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
- memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
- for (i = 0; i < NUM_GPRS; i++)
- regs->gprs[i] = ptregs->gprs[i];
- memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
- regs->orig_gpr2 = ptregs->orig_gpr2;
- return 1;
-}
-
-static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
-{
- if (tsk == current)
- save_fp_regs((s390_fp_regs *) fpregs);
- else
- memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
- return 1;
-}
-
-#include <asm/processor.h>
-#include <asm/pgalloc.h>
-#include <linux/module.h>
-#include <linux/elfcore.h>
-#include <linux/binfmts.h>
-#include <linux/compat.h>
-
-#define elf_prstatus elf_prstatus32
-struct elf_prstatus32
-{
- struct elf_siginfo pr_info; /* Info associated with signal */
- short pr_cursig; /* Current signal */
- u32 pr_sigpend; /* Set of pending signals */
- u32 pr_sighold; /* Set of held signals */
- pid_t pr_pid;
- pid_t pr_ppid;
- pid_t pr_pgrp;
- pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
- struct compat_timeval pr_cutime; /* Cumulative user time */
- struct compat_timeval pr_cstime; /* Cumulative system time */
- elf_gregset_t pr_reg; /* GP registers */
- int pr_fpvalid; /* True if math co-processor being used. */
-};
-
-#define elf_prpsinfo elf_prpsinfo32
-struct elf_prpsinfo32
-{
- char pr_state; /* numeric process state */
- char pr_sname; /* char for pr_state */
- char pr_zomb; /* zombie */
- char pr_nice; /* nice val */
- u32 pr_flag; /* flags */
- u16 pr_uid;
- u16 pr_gid;
- pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
- /* Lots missing */
- char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
-};
-
-#include <linux/highuid.h>
-
-/*
-#define init_elf_binfmt init_elf32_binfmt
-*/
-
-#undef start_thread
-#define start_thread start_thread31
-
-static inline void start_thread31(struct pt_regs *regs, unsigned long new_psw,
- unsigned long new_stackp)
-{
- set_fs(USER_DS);
- regs->psw.mask = psw_user32_bits;
- regs->psw.addr = new_psw;
- regs->gprs[15] = new_stackp;
- crst_table_downgrade(current->mm, 1UL << 31);
-}
-
-MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
- " Copyright 2000 IBM Corporation");
-MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
-
-#undef MODULE_DESCRIPTION
-#undef MODULE_AUTHOR
-
-#undef cputime_to_timeval
-#define cputime_to_timeval cputime_to_compat_timeval
-static inline void
-cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
-{
- value->tv_usec = cputime % 1000000;
- value->tv_sec = cputime / 1000000;
-}
-
-#include "../../../fs/binfmt_elf.c"
-
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 419aef913ee..cde81fa64f8 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -1,7 +1,7 @@
#ifndef _PTRACE32_H
#define _PTRACE32_H
-#include "compat_linux.h" /* needed for _psw_t32 */
+#include "compat_linux.h" /* needed for psw_compat_t */
typedef struct {
__u32 cr[3];
@@ -38,7 +38,7 @@ typedef struct {
struct user_regs_struct32
{
- _psw_t32 psw;
+ psw_compat_t psw;
u32 gprs[NUM_GPRS];
u32 acrs[NUM_ACRS];
u32 orig_gpr2;
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index c93d1296cc0..d80fcd4a7fe 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1079,7 +1079,6 @@ __init debug_init(void)
s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
mutex_lock(&debug_mutex);
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
- printk(KERN_INFO "debug: Initialization complete\n");
initialized = 1;
mutex_unlock(&debug_mutex);
@@ -1193,7 +1192,6 @@ debug_get_uint(char *buf)
for(; isspace(*buf); buf++);
rc = simple_strtoul(buf, &buf, 10);
if(*buf){
- printk("debug: no integer specified!\n");
rc = -EINVAL;
}
return rc;
@@ -1340,19 +1338,12 @@ static void debug_flush(debug_info_t* id, int area)
memset(id->areas[i][j], 0, PAGE_SIZE);
}
}
- printk(KERN_INFO "debug: %s: all areas flushed\n",id->name);
} else if(area >= 0 && area < id->nr_areas) {
id->active_entries[area] = 0;
id->active_pages[area] = 0;
for(i = 0; i < id->pages_per_area; i++) {
memset(id->areas[area][i],0,PAGE_SIZE);
}
- printk(KERN_INFO "debug: %s: area %i has been flushed\n",
- id->name, area);
- } else {
- printk(KERN_INFO
- "debug: %s: area %i cannot be flushed (range: %i - %i)\n",
- id->name, area, 0, id->nr_areas-1);
}
spin_unlock_irqrestore(&id->lock,flags);
}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d0e09684b9c..2a2ca268b1d 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
+#include <asm/ebcdic.h>
#include <asm/ipl.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
@@ -26,12 +27,40 @@
/*
* Create a Kernel NSS if the SAVESYS= parameter is defined
*/
-#define DEFSYS_CMD_SIZE 96
+#define DEFSYS_CMD_SIZE 128
#define SAVESYS_CMD_SIZE 32
char kernel_nss_name[NSS_NAME_SIZE + 1];
+static void __init setup_boot_command_line(void);
+
+
#ifdef CONFIG_SHARED_KERNEL
+int __init savesys_ipl_nss(char *cmd, const int cmdlen);
+
+asm(
+ " .section .init.text,\"ax\",@progbits\n"
+ " .align 4\n"
+ " .type savesys_ipl_nss, @function\n"
+ "savesys_ipl_nss:\n"
+#ifdef CONFIG_64BIT
+ " stmg 6,15,48(15)\n"
+ " lgr 14,3\n"
+ " sam31\n"
+ " diag 2,14,0x8\n"
+ " sam64\n"
+ " lgr 2,14\n"
+ " lmg 6,15,48(15)\n"
+#else
+ " stm 6,15,24(15)\n"
+ " lr 14,3\n"
+ " diag 2,14,0x8\n"
+ " lr 2,14\n"
+ " lm 6,15,24(15)\n"
+#endif
+ " br 14\n"
+ " .size savesys_ipl_nss, .-savesys_ipl_nss\n");
+
static noinline __init void create_kernel_nss(void)
{
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@@ -39,6 +68,7 @@ static noinline __init void create_kernel_nss(void)
unsigned int sinitrd_pfn, einitrd_pfn;
#endif
int response;
+ size_t len;
char *savesys_ptr;
char upper_command_line[COMMAND_LINE_SIZE];
char defsys_cmd[DEFSYS_CMD_SIZE];
@@ -49,8 +79,8 @@ static noinline __init void create_kernel_nss(void)
return;
/* Convert COMMAND_LINE to upper case */
- for (i = 0; i < strlen(COMMAND_LINE); i++)
- upper_command_line[i] = toupper(COMMAND_LINE[i]);
+ for (i = 0; i < strlen(boot_command_line); i++)
+ upper_command_line[i] = toupper(boot_command_line[i]);
savesys_ptr = strstr(upper_command_line, "SAVESYS=");
@@ -83,7 +113,8 @@ static noinline __init void create_kernel_nss(void)
}
#endif
- sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
+ sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK PARMREGS=0-13",
+ defsys_cmd, min_size);
sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
kernel_nss_name, kernel_nss_name);
@@ -94,13 +125,24 @@ static noinline __init void create_kernel_nss(void)
return;
}
- __cpcmd(savesys_cmd, NULL, 0, &response);
+ len = strlen(savesys_cmd);
+ ASCEBC(savesys_cmd, len);
+ response = savesys_ipl_nss(savesys_cmd, len);
- if (response != strlen(savesys_cmd)) {
+ /* On success: response is equal to the command size,
+ * max SAVESYS_CMD_SIZE
+ * On error: response contains the numeric portion of cp error message.
+ * for SAVESYS it will be >= 263
+ */
+ if (response > SAVESYS_CMD_SIZE) {
kernel_nss_name[0] = '\0';
return;
}
+ /* re-setup boot command line with new ipl vm parms */
+ ipl_update_parameters();
+ setup_boot_command_line();
+
ipl_flags = IPL_NSS_VALID;
}
@@ -141,109 +183,11 @@ static noinline __init void detect_machine_type(void)
if (cpuinfo->cpu_id.version == 0xff)
machine_flags |= MACHINE_FLAG_VM;
- /* Running on a P/390 ? */
- if (cpuinfo->cpu_id.machine == 0x7490)
- machine_flags |= MACHINE_FLAG_P390;
-
/* Running under KVM ? */
if (cpuinfo->cpu_id.version == 0xfe)
machine_flags |= MACHINE_FLAG_KVM;
}
-#ifdef CONFIG_64BIT
-static noinline __init int memory_fast_detect(void)
-{
- unsigned long val0 = 0;
- unsigned long val1 = 0xc;
- int ret = -ENOSYS;
-
- if (ipl_flags & IPL_NSS_VALID)
- return -ENOSYS;
-
- asm volatile(
- " diag %1,%2,0x260\n"
- "0: lhi %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
-
- if (ret || val0 != val1)
- return -ENOSYS;
-
- memory_chunk[0].size = val0 + 1;
- return 0;
-}
-#else
-static inline int memory_fast_detect(void)
-{
- return -ENOSYS;
-}
-#endif
-
-static inline __init unsigned long __tprot(unsigned long addr)
-{
- int cc = -1;
-
- asm volatile(
- " tprot 0(%1),0\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (cc) : "a" (addr) : "cc");
- return (unsigned long)cc;
-}
-
-/* Checking memory in 128KB increments. */
-#define CHUNK_INCR (1UL << 17)
-#define ADDR2G (1UL << 31)
-
-static noinline __init void find_memory_chunks(unsigned long memsize)
-{
- unsigned long addr = 0, old_addr = 0;
- unsigned long old_cc = CHUNK_READ_WRITE;
- unsigned long cc;
- int chunk = 0;
-
- while (chunk < MEMORY_CHUNKS) {
- cc = __tprot(addr);
- while (cc == old_cc) {
- addr += CHUNK_INCR;
- if (memsize && addr >= memsize)
- break;
-#ifndef CONFIG_64BIT
- if (addr == ADDR2G)
- break;
-#endif
- cc = __tprot(addr);
- }
-
- if (old_addr != addr &&
- (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
- memory_chunk[chunk].addr = old_addr;
- memory_chunk[chunk].size = addr - old_addr;
- memory_chunk[chunk].type = old_cc;
- chunk++;
- }
-
- old_addr = addr;
- old_cc = cc;
-
-#ifndef CONFIG_64BIT
- if (addr == ADDR2G)
- break;
-#endif
- /*
- * Finish memory detection at the first hole
- * if storage size is unknown.
- */
- if (cc == -1UL && !memsize)
- break;
- if (memsize && addr >= memsize)
- break;
- }
-}
-
static __init void early_pgm_check_handler(void)
{
unsigned long addr;
@@ -380,23 +324,61 @@ static __init void detect_machine_facilities(void)
#endif
}
+static __init void rescue_initrd(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * Move the initrd right behind the bss section in case it starts
+ * within the bss section. So we don't overwrite it when the bss
+ * section gets cleared.
+ */
+ if (!INITRD_START || !INITRD_SIZE)
+ return;
+ if (INITRD_START >= (unsigned long) __bss_stop)
+ return;
+ memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE);
+ INITRD_START = (unsigned long) __bss_stop;
+#endif
+}
+
+/* Set up boot command line */
+static void __init setup_boot_command_line(void)
+{
+ char *parm = NULL;
+
+ /* copy arch command line */
+ strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
+ boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
+
+ /* append IPL PARM data to the boot command line */
+ if (MACHINE_IS_VM) {
+ parm = boot_command_line + strlen(boot_command_line);
+ *parm++ = ' ';
+ get_ipl_vmparm(parm);
+ if (parm[0] == '=')
+ memmove(boot_command_line, parm + 1, strlen(parm));
+ }
+}
+
+
/*
* Save ipl parameters, clear bss memory, initialize storage keys
* and create a kernel NSS at startup if the SAVESYS= parm is defined
*/
void __init startup_init(void)
{
- unsigned long long memsize;
-
ipl_save_parameters();
+ rescue_initrd();
clear_bss_section();
init_kernel_storage_key();
lockdep_init();
lockdep_off();
- detect_machine_type();
- create_kernel_nss();
sort_main_extable();
setup_lowcore_early();
+ detect_machine_type();
+ ipl_update_parameters();
+ setup_boot_command_line();
+ create_kernel_nss();
detect_mvpg();
detect_ieee();
detect_csp();
@@ -404,18 +386,7 @@ void __init startup_init(void)
detect_diag44();
detect_machine_facilities();
setup_hpage();
- sclp_read_info_early();
sclp_facilities_detect();
- memsize = sclp_memory_detect();
-#ifndef CONFIG_64BIT
- /*
- * Can't deal with more than 2G in 31 bit addressing mode, so
- * limit the value in order to avoid strange side effects.
- */
- if (memsize > ADDR2G)
- memsize = ADDR2G;
-#endif
- if (memory_fast_detect() < 0)
- find_memory_chunks((unsigned long) memsize);
+ detect_memory_layout(memory_chunk);
lockdep_on();
}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 532542447d6..54b2779b5e2 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/ctype.h>
+#include <linux/fs.h>
#include <asm/ipl.h>
#include <asm/smp.h>
#include <asm/setup.h>
@@ -22,6 +23,7 @@
#include <asm/ebcdic.h>
#include <asm/reset.h>
#include <asm/sclp.h>
+#include <asm/setup.h>
#define IPL_PARM_BLOCK_VERSION 0
@@ -121,6 +123,7 @@ enum ipl_method {
REIPL_METHOD_FCP_RO_VM,
REIPL_METHOD_FCP_DUMP,
REIPL_METHOD_NSS,
+ REIPL_METHOD_NSS_DIAG,
REIPL_METHOD_DEFAULT,
};
@@ -134,14 +137,15 @@ enum dump_method {
static int diag308_set_works = 0;
+static struct ipl_parameter_block ipl_block;
+
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
static struct ipl_parameter_block *reipl_block_fcp;
static struct ipl_parameter_block *reipl_block_ccw;
-
-static char reipl_nss_name[NSS_NAME_SIZE + 1];
+static struct ipl_parameter_block *reipl_block_nss;
static int dump_capabilities = DUMP_TYPE_NONE;
static enum dump_type dump_type = DUMP_TYPE_NONE;
@@ -263,6 +267,56 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
+/* VM IPL PARM routines */
+static void reipl_get_ascii_vmparm(char *dest,
+ const struct ipl_parameter_block *ipb)
+{
+ int i;
+ int len = 0;
+ char has_lowercase = 0;
+
+ if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
+ (ipb->ipl_info.ccw.vm_parm_len > 0)) {
+
+ len = ipb->ipl_info.ccw.vm_parm_len;
+ memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
+ /* If at least one character is lowercase, we assume mixed
+ * case; otherwise we convert everything to lowercase.
+ */
+ for (i = 0; i < len; i++)
+ if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
+ (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
+ (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
+ has_lowercase = 1;
+ break;
+ }
+ if (!has_lowercase)
+ EBC_TOLOWER(dest, len);
+ EBCASC(dest, len);
+ }
+ dest[len] = 0;
+}
+
+void get_ipl_vmparm(char *dest)
+{
+ if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
+ reipl_get_ascii_vmparm(dest, &ipl_block);
+ else
+ dest[0] = 0;
+}
+
+static ssize_t ipl_vm_parm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ char parm[DIAG308_VMPARM_SIZE + 1] = {};
+
+ get_ipl_vmparm(parm);
+ return sprintf(page, "%s\n", parm);
+}
+
+static struct kobj_attribute sys_ipl_vm_parm_attr =
+ __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
+
static ssize_t sys_ipl_device_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
@@ -285,14 +339,8 @@ static struct kobj_attribute sys_ipl_device_attr =
static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
- unsigned int size = IPL_PARMBLOCK_SIZE;
-
- if (off > size)
- return 0;
- if (off + count > size)
- count = size - off;
- memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count);
- return count;
+ return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
+ IPL_PARMBLOCK_SIZE);
}
static struct bin_attribute ipl_parameter_attr = {
@@ -310,12 +358,7 @@ static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *att
unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
- if (off > size)
- return 0;
- if (off + count > size)
- count = size - off;
- memcpy(buf, scp_data + off, count);
- return count;
+ return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static struct bin_attribute ipl_scp_data_attr = {
@@ -370,15 +413,27 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
-static struct attribute *ipl_ccw_attrs[] = {
+static struct attribute *ipl_ccw_attrs_vm[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
+ &sys_ipl_vm_parm_attr.attr,
NULL,
};
-static struct attribute_group ipl_ccw_attr_group = {
- .attrs = ipl_ccw_attrs,
+static struct attribute *ipl_ccw_attrs_lpar[] = {
+ &sys_ipl_type_attr.attr,
+ &sys_ipl_device_attr.attr,
+ &sys_ipl_ccw_loadparm_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ipl_ccw_attr_group_vm = {
+ .attrs = ipl_ccw_attrs_vm,
+};
+
+static struct attribute_group ipl_ccw_attr_group_lpar = {
+ .attrs = ipl_ccw_attrs_lpar
};
/* NSS ipl device attributes */
@@ -388,6 +443,8 @@ DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
static struct attribute *ipl_nss_attrs[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_nss_name_attr.attr,
+ &sys_ipl_ccw_loadparm_attr.attr,
+ &sys_ipl_vm_parm_attr.attr,
NULL,
};
@@ -450,7 +507,12 @@ static int __init ipl_init(void)
}
switch (ipl_info.type) {
case IPL_TYPE_CCW:
- rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group);
+ if (MACHINE_IS_VM)
+ rc = sysfs_create_group(&ipl_kset->kobj,
+ &ipl_ccw_attr_group_vm);
+ else
+ rc = sysfs_create_group(&ipl_kset->kobj,
+ &ipl_ccw_attr_group_lpar);
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
@@ -481,6 +543,83 @@ static struct shutdown_action __refdata ipl_action = {
* reipl shutdown action: Reboot Linux on shutdown.
*/
+/* VM IPL PARM attributes */
+static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
+ char *page)
+{
+ char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
+
+ reipl_get_ascii_vmparm(vmparm, ipb);
+ return sprintf(page, "%s\n", vmparm);
+}
+
+static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
+ size_t vmparm_max,
+ const char *buf, size_t len)
+{
+ int i, ip_len;
+
+ /* ignore trailing newline */
+ ip_len = len;
+ if ((len > 0) && (buf[len - 1] == '\n'))
+ ip_len--;
+
+ if (ip_len > vmparm_max)
+ return -EINVAL;
+
+ /* parm is used to store kernel options, check for common chars */
+ for (i = 0; i < ip_len; i++)
+ if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
+ return -EINVAL;
+
+ memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
+ ipb->ipl_info.ccw.vm_parm_len = ip_len;
+ if (ip_len > 0) {
+ ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
+ memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len);
+ ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len);
+ } else {
+ ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID;
+ }
+
+ return len;
+}
+
+/* NSS wrapper */
+static ssize_t reipl_nss_vmparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return reipl_generic_vmparm_show(reipl_block_nss, page);
+}
+
+static ssize_t reipl_nss_vmparm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return reipl_generic_vmparm_store(reipl_block_nss, 56, buf, len);
+}
+
+/* CCW wrapper */
+static ssize_t reipl_ccw_vmparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return reipl_generic_vmparm_show(reipl_block_ccw, page);
+}
+
+static ssize_t reipl_ccw_vmparm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return reipl_generic_vmparm_store(reipl_block_ccw, 64, buf, len);
+}
+
+static struct kobj_attribute sys_reipl_nss_vmparm_attr =
+ __ATTR(parm, S_IRUGO | S_IWUSR, reipl_nss_vmparm_show,
+ reipl_nss_vmparm_store);
+static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
+ __ATTR(parm, S_IRUGO | S_IWUSR, reipl_ccw_vmparm_show,
+ reipl_ccw_vmparm_store);
+
/* FCP reipl device attributes */
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
@@ -513,27 +652,26 @@ static struct attribute_group reipl_fcp_attr_group = {
DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
reipl_block_ccw->ipl_info.ccw.devno);
-static void reipl_get_ascii_loadparm(char *loadparm)
+static void reipl_get_ascii_loadparm(char *loadparm,
+ struct ipl_parameter_block *ibp)
{
- memcpy(loadparm, &reipl_block_ccw->ipl_info.ccw.load_param,
- LOADPARM_LEN);
+ memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
strstrip(loadparm);
}
-static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *page)
+static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
+ char *page)
{
char buf[LOADPARM_LEN + 1];
- reipl_get_ascii_loadparm(buf);
+ reipl_get_ascii_loadparm(buf, ipb);
return sprintf(page, "%s\n", buf);
}
-static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t len)
+static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
+ const char *buf, size_t len)
{
int i, lp_len;
@@ -552,35 +690,128 @@ static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
return -EINVAL;
}
/* initialize loadparm with blanks */
- memset(&reipl_block_ccw->ipl_info.ccw.load_param, ' ', LOADPARM_LEN);
+ memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
/* copy and convert to ebcdic */
- memcpy(&reipl_block_ccw->ipl_info.ccw.load_param, buf, lp_len);
- ASCEBC(reipl_block_ccw->ipl_info.ccw.load_param, LOADPARM_LEN);
+ memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
+ ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
return len;
}
+/* NSS wrapper */
+static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return reipl_generic_loadparm_show(reipl_block_nss, page);
+}
+
+static ssize_t reipl_nss_loadparm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return reipl_generic_loadparm_store(reipl_block_nss, buf, len);
+}
+
+/* CCW wrapper */
+static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return reipl_generic_loadparm_show(reipl_block_ccw, page);
+}
+
+static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return reipl_generic_loadparm_store(reipl_block_ccw, buf, len);
+}
+
static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
- __ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
- reipl_ccw_loadparm_store);
+ __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show,
+ reipl_ccw_loadparm_store);
-static struct attribute *reipl_ccw_attrs[] = {
+static struct attribute *reipl_ccw_attrs_vm[] = {
&sys_reipl_ccw_device_attr.attr,
&sys_reipl_ccw_loadparm_attr.attr,
+ &sys_reipl_ccw_vmparm_attr.attr,
NULL,
};
-static struct attribute_group reipl_ccw_attr_group = {
+static struct attribute *reipl_ccw_attrs_lpar[] = {
+ &sys_reipl_ccw_device_attr.attr,
+ &sys_reipl_ccw_loadparm_attr.attr,
+ NULL,
+};
+
+static struct attribute_group reipl_ccw_attr_group_vm = {
+ .name = IPL_CCW_STR,
+ .attrs = reipl_ccw_attrs_vm,
+};
+
+static struct attribute_group reipl_ccw_attr_group_lpar = {
.name = IPL_CCW_STR,
- .attrs = reipl_ccw_attrs,
+ .attrs = reipl_ccw_attrs_lpar,
};
/* NSS reipl device attributes */
+static void reipl_get_ascii_nss_name(char *dst,
+ struct ipl_parameter_block *ipb)
+{
+ memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
+ EBCASC(dst, NSS_NAME_SIZE);
+ dst[NSS_NAME_SIZE] = 0;
+}
+
+static ssize_t reipl_nss_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ char nss_name[NSS_NAME_SIZE + 1] = {};
-DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name);
+ reipl_get_ascii_nss_name(nss_name, reipl_block_nss);
+ return sprintf(page, "%s\n", nss_name);
+}
+
+static ssize_t reipl_nss_name_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ int nss_len;
+
+ /* ignore trailing newline */
+ nss_len = len;
+ if ((len > 0) && (buf[len - 1] == '\n'))
+ nss_len--;
+
+ if (nss_len > NSS_NAME_SIZE)
+ return -EINVAL;
+
+ memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE);
+ if (nss_len > 0) {
+ reipl_block_nss->ipl_info.ccw.vm_flags |=
+ DIAG308_VM_FLAGS_NSS_VALID;
+ memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len);
+ ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
+ EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
+ } else {
+ reipl_block_nss->ipl_info.ccw.vm_flags &=
+ ~DIAG308_VM_FLAGS_NSS_VALID;
+ }
+
+ return len;
+}
+
+static struct kobj_attribute sys_reipl_nss_name_attr =
+ __ATTR(name, S_IRUGO | S_IWUSR, reipl_nss_name_show,
+ reipl_nss_name_store);
+
+static struct kobj_attribute sys_reipl_nss_loadparm_attr =
+ __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nss_loadparm_show,
+ reipl_nss_loadparm_store);
static struct attribute *reipl_nss_attrs[] = {
&sys_reipl_nss_name_attr.attr,
+ &sys_reipl_nss_loadparm_attr.attr,
+ &sys_reipl_nss_vmparm_attr.attr,
NULL,
};
@@ -617,7 +848,10 @@ static int reipl_set_type(enum ipl_type type)
reipl_method = REIPL_METHOD_FCP_DUMP;
break;
case IPL_TYPE_NSS:
- reipl_method = REIPL_METHOD_NSS;
+ if (diag308_set_works)
+ reipl_method = REIPL_METHOD_NSS_DIAG;
+ else
+ reipl_method = REIPL_METHOD_NSS;
break;
case IPL_TYPE_UNKNOWN:
reipl_method = REIPL_METHOD_DEFAULT;
@@ -655,11 +889,38 @@ static struct kobj_attribute reipl_type_attr =
static struct kset *reipl_kset;
+static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
+ const enum ipl_method m)
+{
+ char loadparm[LOADPARM_LEN + 1] = {};
+ char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
+ char nss_name[NSS_NAME_SIZE + 1] = {};
+ size_t pos = 0;
+
+ reipl_get_ascii_loadparm(loadparm, ipb);
+ reipl_get_ascii_nss_name(nss_name, ipb);
+ reipl_get_ascii_vmparm(vmparm, ipb);
+
+ switch (m) {
+ case REIPL_METHOD_CCW_VM:
+ pos = sprintf(dst, "IPL %X CLEAR", ipb->ipl_info.ccw.devno);
+ break;
+ case REIPL_METHOD_NSS:
+ pos = sprintf(dst, "IPL %s", nss_name);
+ break;
+ default:
+ break;
+ }
+ if (strlen(loadparm) > 0)
+ pos += sprintf(dst + pos, " LOADPARM '%s'", loadparm);
+ if (strlen(vmparm) > 0)
+ sprintf(dst + pos, " PARM %s", vmparm);
+}
+
static void reipl_run(struct shutdown_trigger *trigger)
{
struct ccw_dev_id devid;
- static char buf[100];
- char loadparm[LOADPARM_LEN + 1];
+ static char buf[128];
switch (reipl_method) {
case REIPL_METHOD_CCW_CIO:
@@ -668,13 +929,7 @@ static void reipl_run(struct shutdown_trigger *trigger)
reipl_ccw_dev(&devid);
break;
case REIPL_METHOD_CCW_VM:
- reipl_get_ascii_loadparm(loadparm);
- if (strlen(loadparm) == 0)
- sprintf(buf, "IPL %X CLEAR",
- reipl_block_ccw->ipl_info.ccw.devno);
- else
- sprintf(buf, "IPL %X CLEAR LOADPARM '%s'",
- reipl_block_ccw->ipl_info.ccw.devno, loadparm);
+ get_ipl_string(buf, reipl_block_ccw, REIPL_METHOD_CCW_VM);
__cpcmd(buf, NULL, 0, NULL);
break;
case REIPL_METHOD_CCW_DIAG:
@@ -691,8 +946,12 @@ static void reipl_run(struct shutdown_trigger *trigger)
case REIPL_METHOD_FCP_RO_VM:
__cpcmd("IPL", NULL, 0, NULL);
break;
+ case REIPL_METHOD_NSS_DIAG:
+ diag308(DIAG308_SET, reipl_block_nss);
+ diag308(DIAG308_IPL, NULL);
+ break;
case REIPL_METHOD_NSS:
- sprintf(buf, "IPL %s", reipl_nss_name);
+ get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS);
__cpcmd(buf, NULL, 0, NULL);
break;
case REIPL_METHOD_DEFAULT:
@@ -707,16 +966,36 @@ static void reipl_run(struct shutdown_trigger *trigger)
disabled_wait((unsigned long) __builtin_return_address(0));
}
-static void __init reipl_probe(void)
+static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
{
- void *buffer;
+ ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
+ ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
+ ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
+ ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+}
- buffer = (void *) get_zeroed_page(GFP_KERNEL);
- if (!buffer)
- return;
- if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK)
- diag308_set_works = 1;
- free_page((unsigned long)buffer);
+static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
+{
+ /* LOADPARM */
+ /* check if read scp info worked and set loadparm */
+ if (sclp_ipl_info.is_valid)
+ memcpy(ipb->ipl_info.ccw.load_parm,
+ &sclp_ipl_info.loadparm, LOADPARM_LEN);
+ else
+ /* read scp info failed: set empty loadparm (EBCDIC blanks) */
+ memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
+ ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
+
+ /* VM PARM */
+ if (MACHINE_IS_VM && diag308_set_works &&
+ (ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
+
+ ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
+ ipb->ipl_info.ccw.vm_parm_len =
+ ipl_block.ipl_info.ccw.vm_parm_len;
+ memcpy(ipb->ipl_info.ccw.vm_parm,
+ ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE);
+ }
}
static int __init reipl_nss_init(void)
@@ -725,10 +1004,31 @@ static int __init reipl_nss_init(void)
if (!MACHINE_IS_VM)
return 0;
+
+ reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!reipl_block_nss)
+ return -ENOMEM;
+
+ if (!diag308_set_works)
+ sys_reipl_nss_vmparm_attr.attr.mode = S_IRUGO;
+
rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
if (rc)
return rc;
- strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
+
+ reipl_block_ccw_init(reipl_block_nss);
+ if (ipl_info.type == IPL_TYPE_NSS) {
+ memset(reipl_block_nss->ipl_info.ccw.nss_name,
+ ' ', NSS_NAME_SIZE);
+ memcpy(reipl_block_nss->ipl_info.ccw.nss_name,
+ kernel_nss_name, strlen(kernel_nss_name));
+ ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
+ reipl_block_nss->ipl_info.ccw.vm_flags |=
+ DIAG308_VM_FLAGS_NSS_VALID;
+
+ reipl_block_ccw_fill_parms(reipl_block_nss);
+ }
+
reipl_capabilities |= IPL_TYPE_NSS;
return 0;
}
@@ -740,28 +1040,27 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_ccw)
return -ENOMEM;
- rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group);
- if (rc) {
- free_page((unsigned long)reipl_block_ccw);
- return rc;
+
+ if (MACHINE_IS_VM) {
+ if (!diag308_set_works)
+ sys_reipl_ccw_vmparm_attr.attr.mode = S_IRUGO;
+ rc = sysfs_create_group(&reipl_kset->kobj,
+ &reipl_ccw_attr_group_vm);
+ } else {
+ if(!diag308_set_works)
+ sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
+ rc = sysfs_create_group(&reipl_kset->kobj,
+ &reipl_ccw_attr_group_lpar);
}
- reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
- reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
- reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
- reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
- reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID;
- /* check if read scp info worked and set loadparm */
- if (sclp_ipl_info.is_valid)
- memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
- &sclp_ipl_info.loadparm, LOADPARM_LEN);
- else
- /* read scp info failed: set empty loadparm (EBCDIC blanks) */
- memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
- LOADPARM_LEN);
- if (!MACHINE_IS_VM && !diag308_set_works)
- sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
- if (ipl_info.type == IPL_TYPE_CCW)
+ if (rc)
+ return rc;
+
+ reipl_block_ccw_init(reipl_block_ccw);
+ if (ipl_info.type == IPL_TYPE_CCW) {
reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
+ reipl_block_ccw_fill_parms(reipl_block_ccw);
+ }
+
reipl_capabilities |= IPL_TYPE_CCW;
return 0;
}
@@ -1298,7 +1597,6 @@ static void __init shutdown_actions_init(void)
static int __init s390_ipl_init(void)
{
- reipl_probe();
sclp_get_ipl_info(&sclp_ipl_info);
shutdown_actions_init();
shutdown_triggers_init();
@@ -1405,6 +1703,12 @@ void __init setup_ipl(void)
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
}
+void __init ipl_update_parameters(void)
+{
+ if (diag308(DIAG308_STORE, &ipl_block) == DIAG308_RC_OK)
+ diag308_set_works = 1;
+}
+
void __init ipl_save_parameters(void)
{
struct cio_iplinfo iplinfo;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index ed04d1372d5..288ad490a6d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -41,10 +41,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
return -EINVAL;
- if ((unsigned long)p->addr & 0x01) {
- printk("Attempt to register kprobe at an unaligned address\n");
+ if ((unsigned long)p->addr & 0x01)
return -EINVAL;
- }
/* Use the get_insn_slot() facility for correctness */
if (!(p->ainsn.insn = get_insn_slot()))
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3c77dd36994..131d7ee8b41 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -52,7 +52,6 @@ void machine_kexec_cleanup(struct kimage *image)
void machine_shutdown(void)
{
- printk(KERN_INFO "kexec: machine_shutdown called\n");
}
void machine_kexec(struct kimage *image)
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
new file mode 100644
index 00000000000..18ed7abe16c
--- /dev/null
+++ b/arch/s390/kernel/mem_detect.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright IBM Corp. 2008
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/ipl.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+
+static int memory_fast_detect(struct mem_chunk *chunk)
+{
+ unsigned long val0 = 0;
+ unsigned long val1 = 0xc;
+ int rc = -EOPNOTSUPP;
+
+ if (ipl_flags & IPL_NSS_VALID)
+ return -EOPNOTSUPP;
+ asm volatile(
+ " diag %1,%2,0x260\n"
+ "0: lhi %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (rc), "+d" (val0), "+d" (val1) : : "cc");
+
+ if (rc || val0 != val1)
+ return -EOPNOTSUPP;
+ chunk->size = val0 + 1;
+ return 0;
+}
+
+static inline int tprot(unsigned long addr)
+{
+ int rc = -EFAULT;
+
+ asm volatile(
+ " tprot 0(%1),0\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (rc) : "a" (addr) : "cc");
+ return rc;
+}
+
+#define ADDR2G (1ULL << 31)
+
+static void find_memory_chunks(struct mem_chunk chunk[])
+{
+ unsigned long long memsize, rnmax, rzm;
+ unsigned long addr = 0, size;
+ int i = 0, type;
+
+ rzm = sclp_get_rzm();
+ rnmax = sclp_get_rnmax();
+ memsize = rzm * rnmax;
+ if (!rzm)
+ rzm = 1ULL << 17;
+ if (sizeof(long) == 4) {
+ rzm = min(ADDR2G, rzm);
+ memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
+ }
+ do {
+ size = 0;
+ type = tprot(addr);
+ do {
+ size += rzm;
+ if (memsize && addr + size >= memsize)
+ break;
+ } while (type == tprot(addr + size));
+ if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
+ chunk[i].addr = addr;
+ chunk[i].size = size;
+ chunk[i].type = type;
+ i++;
+ }
+ addr += size;
+ } while (addr < memsize && i < MEMORY_CHUNKS);
+}
+
+void detect_memory_layout(struct mem_chunk chunk[])
+{
+ unsigned long flags, cr0;
+
+ memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
+ if (memory_fast_detect(&chunk[0]) == 0)
+ return;
+ /* Disable IRQs, DAT and low address protection so tprot does the
+ * right thing and we don't get scheduled away with low address
+ * protection disabled.
+ */
+ flags = __raw_local_irq_stnsm(0xf8);
+ __ctl_store(cr0, 0, 0);
+ __ctl_clear_bit(0, 28);
+ find_memory_chunks(chunk);
+ __ctl_load(cr0, 0, 0);
+ __raw_local_irq_ssm(flags);
+}
+EXPORT_SYMBOL(detect_memory_layout);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7920861109d..85defd01d29 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -75,46 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}
-/*
- * Need to know about CPUs going idle?
- */
-static ATOMIC_NOTIFIER_HEAD(idle_chain);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
-int register_idle_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&idle_chain, nb);
-}
-EXPORT_SYMBOL(register_idle_notifier);
-
-int unregister_idle_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&idle_chain, nb);
-}
-EXPORT_SYMBOL(unregister_idle_notifier);
-
static int s390_idle_enter(void)
{
struct s390_idle_data *idle;
- int nr_calls = 0;
- void *hcpu;
- int rc;
- hcpu = (void *)(long)smp_processor_id();
- rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
- &nr_calls);
- if (rc == NOTIFY_BAD) {
- nr_calls--;
- __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
- hcpu, nr_calls, NULL);
- return rc;
- }
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_count++;
idle->in_idle = 1;
idle->idle_enter = get_clock();
spin_unlock(&idle->lock);
+ vtime_stop_cpu_timer();
return NOTIFY_OK;
}
@@ -122,13 +95,12 @@ void s390_idle_leave(void)
{
struct s390_idle_data *idle;
+ vtime_start_cpu_timer();
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_time += get_clock() - idle->idle_enter;
idle->in_idle = 0;
spin_unlock(&idle->lock);
- atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
- (void *)(long) smp_processor_id());
}
extern void s390_handle_mcck(void);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 35827b9bd4d..2815bfe348a 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -33,6 +33,8 @@
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/signal.h>
+#include <linux/elf.h>
+#include <linux/regset.h>
#include <asm/segment.h>
#include <asm/page.h>
@@ -47,6 +49,11 @@
#include "compat_ptrace.h"
#endif
+enum s390_regset {
+ REGSET_GENERAL,
+ REGSET_FP,
+};
+
static void
FixPerRegisters(struct task_struct *task)
{
@@ -126,24 +133,10 @@ ptrace_disable(struct task_struct *child)
* struct user contain pad bytes that should be read as zeroes.
* Lovely...
*/
-static int
-peek_user(struct task_struct *child, addr_t addr, addr_t data)
+static unsigned long __peek_user(struct task_struct *child, addr_t addr)
{
struct user *dummy = NULL;
- addr_t offset, tmp, mask;
-
- /*
- * Stupid gdb peeks/pokes the access registers in 64 bit with
- * an alignment of 4. Programmers from hell...
- */
- mask = __ADDR_MASK;
-#ifdef CONFIG_64BIT
- if (addr >= (addr_t) &dummy->regs.acrs &&
- addr < (addr_t) &dummy->regs.orig_gpr2)
- mask = 3;
-#endif
- if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
- return -EIO;
+ addr_t offset, tmp;
if (addr < (addr_t) &dummy->regs.acrs) {
/*
@@ -197,24 +190,18 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
} else
tmp = 0;
- return put_user(tmp, (addr_t __user *) data);
+ return tmp;
}
-/*
- * Write a word to the user area of a process at location addr. This
- * operation does have an additional problem compared to peek_user.
- * Stores to the program status word and on the floating point
- * control register needs to get checked for validity.
- */
static int
-poke_user(struct task_struct *child, addr_t addr, addr_t data)
+peek_user(struct task_struct *child, addr_t addr, addr_t data)
{
struct user *dummy = NULL;
- addr_t offset, mask;
+ addr_t tmp, mask;
/*
* Stupid gdb peeks/pokes the access registers in 64 bit with
- * an alignment of 4. Programmers from hell indeed...
+ * an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
#ifdef CONFIG_64BIT
@@ -225,6 +212,21 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
+ tmp = __peek_user(child, addr);
+ return put_user(tmp, (addr_t __user *) data);
+}
+
+/*
+ * Write a word to the user area of a process at location addr. This
+ * operation does have an additional problem compared to peek_user.
+ * Stores to the program status word and on the floating point
+ * control register needs to get checked for validity.
+ */
+static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
+{
+ struct user *dummy = NULL;
+ addr_t offset;
+
if (addr < (addr_t) &dummy->regs.acrs) {
/*
* psw and gprs are stored on the stack
@@ -292,6 +294,28 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
return 0;
}
+static int
+poke_user(struct task_struct *child, addr_t addr, addr_t data)
+{
+ struct user *dummy = NULL;
+ addr_t mask;
+
+ /*
+ * Stupid gdb peeks/pokes the access registers in 64 bit with
+ * an alignment of 4. Programmers from hell indeed...
+ */
+ mask = __ADDR_MASK;
+#ifdef CONFIG_64BIT
+ if (addr >= (addr_t) &dummy->regs.acrs &&
+ addr < (addr_t) &dummy->regs.orig_gpr2)
+ mask = 3;
+#endif
+ if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
+ return -EIO;
+
+ return __poke_user(child, addr, data);
+}
+
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
ptrace_area parea;
@@ -367,18 +391,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/*
* Same as peek_user but for a 31 bit program.
*/
-static int
-peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
+static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
{
struct user32 *dummy32 = NULL;
per_struct32 *dummy_per32 = NULL;
addr_t offset;
__u32 tmp;
- if (!test_thread_flag(TIF_31BIT) ||
- (addr & 3) || addr > sizeof(struct user) - 3)
- return -EIO;
-
if (addr < (addr_t) &dummy32->regs.acrs) {
/*
* psw and gprs are stored on the stack
@@ -435,25 +454,32 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
} else
tmp = 0;
+ return tmp;
+}
+
+static int peek_user_compat(struct task_struct *child,
+ addr_t addr, addr_t data)
+{
+ __u32 tmp;
+
+ if (!test_thread_flag(TIF_31BIT) ||
+ (addr & 3) || addr > sizeof(struct user) - 3)
+ return -EIO;
+
+ tmp = __peek_user_compat(child, addr);
return put_user(tmp, (__u32 __user *) data);
}
/*
* Same as poke_user but for a 31 bit program.
*/
-static int
-poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
+static int __poke_user_compat(struct task_struct *child,
+ addr_t addr, addr_t data)
{
struct user32 *dummy32 = NULL;
per_struct32 *dummy_per32 = NULL;
+ __u32 tmp = (__u32) data;
addr_t offset;
- __u32 tmp;
-
- if (!test_thread_flag(TIF_31BIT) ||
- (addr & 3) || addr > sizeof(struct user32) - 3)
- return -EIO;
-
- tmp = (__u32) data;
if (addr < (addr_t) &dummy32->regs.acrs) {
/*
@@ -528,6 +554,16 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
return 0;
}
+static int poke_user_compat(struct task_struct *child,
+ addr_t addr, addr_t data)
+{
+ if (!test_thread_flag(TIF_31BIT) ||
+ (addr & 3) || addr > sizeof(struct user32) - 3)
+ return -EIO;
+
+ return __poke_user_compat(child, addr, data);
+}
+
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
@@ -539,11 +575,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
switch (request) {
case PTRACE_PEEKUSR:
/* read the word at location addr in the USER area. */
- return peek_user_emu31(child, addr, data);
+ return peek_user_compat(child, addr, data);
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
- return poke_user_emu31(child, addr, data);
+ return poke_user_compat(child, addr, data);
case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA:
@@ -555,13 +591,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
copied = 0;
while (copied < parea.len) {
if (request == PTRACE_PEEKUSR_AREA)
- ret = peek_user_emu31(child, addr, data);
+ ret = peek_user_compat(child, addr, data);
else {
__u32 utmp;
if (get_user(utmp,
(__u32 __force __user *) data))
return -EFAULT;
- ret = poke_user_emu31(child, addr, utmp);
+ ret = poke_user_compat(child, addr, utmp);
}
if (ret)
return ret;
@@ -610,3 +646,240 @@ syscall_trace(struct pt_regs *regs, int entryexit)
regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
regs->gprs[4], regs->gprs[5]);
}
+
+/*
+ * user_regset definitions.
+ */
+
+static int s390_regs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ if (target == current)
+ save_access_regs(target->thread.acrs);
+
+ if (kbuf) {
+ unsigned long *k = kbuf;
+ while (count > 0) {
+ *k++ = __peek_user(target, pos);
+ count -= sizeof(*k);
+ pos += sizeof(*k);
+ }
+ } else {
+ unsigned long __user *u = ubuf;
+ while (count > 0) {
+ if (__put_user(__peek_user(target, pos), u++))
+ return -EFAULT;
+ count -= sizeof(*u);
+ pos += sizeof(*u);
+ }
+ }
+ return 0;
+}
+
+static int s390_regs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int rc = 0;
+
+ if (target == current)
+ save_access_regs(target->thread.acrs);
+
+ if (kbuf) {
+ const unsigned long *k = kbuf;
+ while (count > 0 && !rc) {
+ rc = __poke_user(target, pos, *k++);
+ count -= sizeof(*k);
+ pos += sizeof(*k);
+ }
+ } else {
+ const unsigned long __user *u = ubuf;
+ while (count > 0 && !rc) {
+ unsigned long word;
+ rc = __get_user(word, u++);
+ if (rc)
+ break;
+ rc = __poke_user(target, pos, word);
+ count -= sizeof(*u);
+ pos += sizeof(*u);
+ }
+ }
+
+ if (rc == 0 && target == current)
+ restore_access_regs(target->thread.acrs);
+
+ return rc;
+}
+
+static int s390_fpregs_get(struct task_struct *target,
+ const struct user_regset *regset, unsigned int pos,
+ unsigned int count, void *kbuf, void __user *ubuf)
+{
+ if (target == current)
+ save_fp_regs(&target->thread.fp_regs);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fp_regs, 0, -1);
+}
+
+static int s390_fpregs_set(struct task_struct *target,
+ const struct user_regset *regset, unsigned int pos,
+ unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ int rc = 0;
+
+ if (target == current)
+ save_fp_regs(&target->thread.fp_regs);
+
+ /* If setting FPC, must validate it first. */
+ if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
+ u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
+ rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
+ 0, offsetof(s390_fp_regs, fprs));
+ if (rc)
+ return rc;
+ if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
+ return -EINVAL;
+ target->thread.fp_regs.fpc = fpc[0];
+ }
+
+ if (rc == 0 && count > 0)
+ rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.fp_regs.fprs,
+ offsetof(s390_fp_regs, fprs), -1);
+
+ if (rc == 0 && target == current)
+ restore_fp_regs(&target->thread.fp_regs);
+
+ return rc;
+}
+
+static const struct user_regset s390_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(s390_regs) / sizeof(long),
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = s390_regs_get,
+ .set = s390_regs_set,
+ },
+ [REGSET_FP] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(s390_fp_regs) / sizeof(long),
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = s390_fpregs_get,
+ .set = s390_fpregs_set,
+ },
+};
+
+static const struct user_regset_view user_s390_view = {
+ .name = UTS_MACHINE,
+ .e_machine = EM_S390,
+ .regsets = s390_regsets,
+ .n = ARRAY_SIZE(s390_regsets)
+};
+
+#ifdef CONFIG_COMPAT
+static int s390_compat_regs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ if (target == current)
+ save_access_regs(target->thread.acrs);
+
+ if (kbuf) {
+ compat_ulong_t *k = kbuf;
+ while (count > 0) {
+ *k++ = __peek_user_compat(target, pos);
+ count -= sizeof(*k);
+ pos += sizeof(*k);
+ }
+ } else {
+ compat_ulong_t __user *u = ubuf;
+ while (count > 0) {
+ if (__put_user(__peek_user_compat(target, pos), u++))
+ return -EFAULT;
+ count -= sizeof(*u);
+ pos += sizeof(*u);
+ }
+ }
+ return 0;
+}
+
+static int s390_compat_regs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int rc = 0;
+
+ if (target == current)
+ save_access_regs(target->thread.acrs);
+
+ if (kbuf) {
+ const compat_ulong_t *k = kbuf;
+ while (count > 0 && !rc) {
+ rc = __poke_user_compat(target, pos, *k++);
+ count -= sizeof(*k);
+ pos += sizeof(*k);
+ }
+ } else {
+ const compat_ulong_t __user *u = ubuf;
+ while (count > 0 && !rc) {
+ compat_ulong_t word;
+ rc = __get_user(word, u++);
+ if (rc)
+ break;
+ rc = __poke_user_compat(target, pos, word);
+ count -= sizeof(*u);
+ pos += sizeof(*u);
+ }
+ }
+
+ if (rc == 0 && target == current)
+ restore_access_regs(target->thread.acrs);
+
+ return rc;
+}
+
+static const struct user_regset s390_compat_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
+ .size = sizeof(compat_long_t),
+ .align = sizeof(compat_long_t),
+ .get = s390_compat_regs_get,
+ .set = s390_compat_regs_set,
+ },
+ [REGSET_FP] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
+ .size = sizeof(compat_long_t),
+ .align = sizeof(compat_long_t),
+ .get = s390_fpregs_get,
+ .set = s390_fpregs_set,
+ },
+};
+
+static const struct user_regset_view user_s390_compat_view = {
+ .name = "s390",
+ .e_machine = EM_S390,
+ .regsets = s390_compat_regsets,
+ .n = ARRAY_SIZE(s390_compat_regsets)
+};
+#endif
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_COMPAT
+ if (test_tsk_thread_flag(task, TIF_31BIT))
+ return &user_s390_compat_view;
+#endif
+ return &user_s390_view;
+}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2bc70b6e876..b358e18273b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -77,7 +77,7 @@ unsigned long machine_flags;
unsigned long elf_hwcap = 0;
char elf_platform[ELF_PLATFORM_SIZE];
-struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS];
+struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
static unsigned long __initdata memory_end;
@@ -205,12 +205,6 @@ static void __init conmode_default(void)
SET_CONSOLE_SCLP;
#endif
}
- } else if (MACHINE_IS_P390) {
-#if defined(CONFIG_TN3215_CONSOLE)
- SET_CONSOLE_3215;
-#elif defined(CONFIG_TN3270_CONSOLE)
- SET_CONSOLE_3270;
-#endif
} else {
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
@@ -221,18 +215,17 @@ static void __init conmode_default(void)
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
static void __init setup_zfcpdump(unsigned int console_devno)
{
- static char str[64];
+ static char str[41];
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return;
if (console_devno != -1)
- sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x",
+ sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
ipl_info.data.fcp.dev_id.devno, console_devno);
else
- sprintf(str, "cio_ignore=all,!0.0.%04x",
+ sprintf(str, " cio_ignore=all,!0.0.%04x",
ipl_info.data.fcp.dev_id.devno);
- strcat(COMMAND_LINE, " ");
- strcat(COMMAND_LINE, str);
+ strcat(boot_command_line, str);
console_loglevel = 2;
}
#else
@@ -289,32 +282,6 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
-/*
- * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
- */
-static int __init early_parse_ipldelay(char *p)
-{
- unsigned long delay = 0;
-
- delay = simple_strtoul(p, &p, 0);
-
- switch (*p) {
- case 's':
- case 'S':
- delay *= 1000000;
- break;
- case 'm':
- case 'M':
- delay *= 60 * 1000000;
- }
-
- /* now wait for the requested amount of time */
- udelay(delay);
-
- return 0;
-}
-early_param("ipldelay", early_parse_ipldelay);
-
#ifdef CONFIG_S390_SWITCH_AMODE
#ifdef CONFIG_PGSTE
unsigned int switch_amode = 1;
@@ -804,11 +771,9 @@ setup_arch(char **cmdline_p)
printk("We are running native (64 bit mode)\n");
#endif /* CONFIG_64BIT */
- /* Save unparsed command line copy for /proc/cmdline */
- strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
-
- *cmdline_p = COMMAND_LINE;
- *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
+ /* Have one command line that is parsed and saved in /proc/cmdline */
+ /* boot_command_line has been already set up in early.c */
+ *cmdline_p = boot_command_line;
ROOT_DEV = Root_RAM0;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 7aec676fefd..7418bebb547 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -3,7 +3,7 @@
* Time of day based timer functions.
*
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2008
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
@@ -31,6 +31,7 @@
#include <linux/notifier.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/bootmem.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
@@ -162,7 +163,7 @@ void init_cpu_timer(void)
/* Enable clock comparator timer interrupt. */
__ctl_set_bit(0,11);
- /* Always allow ETR external interrupts, even without an ETR. */
+ /* Always allow the timing alert external interrupt. */
__ctl_set_bit(0, 4);
}
@@ -170,8 +171,21 @@ static void clock_comparator_interrupt(__u16 code)
{
}
+static void etr_timing_alert(struct etr_irq_parm *);
+static void stp_timing_alert(struct stp_irq_parm *);
+
+static void timing_alert_interrupt(__u16 code)
+{
+ if (S390_lowcore.ext_params & 0x00c40000)
+ etr_timing_alert((struct etr_irq_parm *)
+ &S390_lowcore.ext_params);
+ if (S390_lowcore.ext_params & 0x00038000)
+ stp_timing_alert((struct stp_irq_parm *)
+ &S390_lowcore.ext_params);
+}
+
static void etr_reset(void);
-static void etr_ext_handler(__u16);
+static void stp_reset(void);
/*
* Get the TOD clock running.
@@ -181,6 +195,7 @@ static u64 __init reset_tod_clock(void)
u64 time;
etr_reset();
+ stp_reset();
if (store_clock(&time) == 0)
return time;
/* TOD clock not running. Set the clock to Unix Epoch. */
@@ -231,8 +246,9 @@ void __init time_init(void)
if (clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
- /* request the etr external interrupt */
- if (register_early_external_interrupt(0x1406, etr_ext_handler,
+ /* request the timing alert external interrupt */
+ if (register_early_external_interrupt(0x1406,
+ timing_alert_interrupt,
&ext_int_etr_cc) != 0)
panic("Couldn't request external interrupt 0x1406");
@@ -245,10 +261,112 @@ void __init time_init(void)
}
/*
+ * The time is "clock". old is what we think the time is.
+ * Adjust the value by a multiple of jiffies and add the delta to ntp.
+ * "delay" is an approximation how long the synchronization took. If
+ * the time correction is positive, then "delay" is subtracted from
+ * the time difference and only the remaining part is passed to ntp.
+ */
+static unsigned long long adjust_time(unsigned long long old,
+ unsigned long long clock,
+ unsigned long long delay)
+{
+ unsigned long long delta, ticks;
+ struct timex adjust;
+
+ if (clock > old) {
+ /* It is later than we thought. */
+ delta = ticks = clock - old;
+ delta = ticks = (delta < delay) ? 0 : delta - delay;
+ delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
+ adjust.offset = ticks * (1000000 / HZ);
+ } else {
+ /* It is earlier than we thought. */
+ delta = ticks = old - clock;
+ delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
+ delta = -delta;
+ adjust.offset = -ticks * (1000000 / HZ);
+ }
+ jiffies_timer_cc += delta;
+ if (adjust.offset != 0) {
+ printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
+ adjust.offset);
+ adjust.modes = ADJ_OFFSET_SINGLESHOT;
+ do_adjtimex(&adjust);
+ }
+ return delta;
+}
+
+static DEFINE_PER_CPU(atomic_t, clock_sync_word);
+static unsigned long clock_sync_flags;
+
+#define CLOCK_SYNC_HAS_ETR 0
+#define CLOCK_SYNC_HAS_STP 1
+#define CLOCK_SYNC_ETR 2
+#define CLOCK_SYNC_STP 3
+
+/*
+ * The synchronous get_clock function. It will write the current clock
+ * value to the clock pointer and return 0 if the clock is in sync with
+ * the external time source. If the clock mode is local it will return
+ * -ENOSYS and -EAGAIN if the clock is not in sync with the external
+ * reference.
+ */
+int get_sync_clock(unsigned long long *clock)
+{
+ atomic_t *sw_ptr;
+ unsigned int sw0, sw1;
+
+ sw_ptr = &get_cpu_var(clock_sync_word);
+ sw0 = atomic_read(sw_ptr);
+ *clock = get_clock();
+ sw1 = atomic_read(sw_ptr);
+ put_cpu_var(clock_sync_sync);
+ if (sw0 == sw1 && (sw0 & 0x80000000U))
+ /* Success: time is in sync. */
+ return 0;
+ if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
+ !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
+ return -ENOSYS;
+ if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
+ !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
+ return -EACCES;
+ return -EAGAIN;
+}
+EXPORT_SYMBOL(get_sync_clock);
+
+/*
+ * Make get_sync_clock return -EAGAIN.
+ */
+static void disable_sync_clock(void *dummy)
+{
+ atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
+ /*
+ * Clear the in-sync bit 2^31. All get_sync_clock calls will
+ * fail until the sync bit is turned back on. In addition
+ * increase the "sequence" counter to avoid the race of an
+ * etr event and the complete recovery against get_sync_clock.
+ */
+ atomic_clear_mask(0x80000000, sw_ptr);
+ atomic_inc(sw_ptr);
+}
+
+/*
+ * Make get_sync_clock return 0 again.
+ * Needs to be called from a context disabled for preemption.
+ */
+static void enable_sync_clock(void)
+{
+ atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
+ atomic_set_mask(0x80000000, sw_ptr);
+}
+
+/*
* External Time Reference (ETR) code.
*/
static int etr_port0_online;
static int etr_port1_online;
+static int etr_steai_available;
static int __init early_parse_etr(char *p)
{
@@ -273,12 +391,6 @@ enum etr_event {
ETR_EVENT_UPDATE,
};
-enum etr_flags {
- ETR_FLAG_ENOSYS,
- ETR_FLAG_EACCES,
- ETR_FLAG_STEAI,
-};
-
/*
* Valid bit combinations of the eacr register are (x = don't care):
* e0 e1 dp p0 p1 ea es sl
@@ -305,74 +417,18 @@ enum etr_flags {
*/
static struct etr_eacr etr_eacr;
static u64 etr_tolec; /* time of last eacr update */
-static unsigned long etr_flags;
static struct etr_aib etr_port0;
static int etr_port0_uptodate;
static struct etr_aib etr_port1;
static int etr_port1_uptodate;
static unsigned long etr_events;
static struct timer_list etr_timer;
-static DEFINE_PER_CPU(atomic_t, etr_sync_word);
static void etr_timeout(unsigned long dummy);
static void etr_work_fn(struct work_struct *work);
static DECLARE_WORK(etr_work, etr_work_fn);
/*
- * The etr get_clock function. It will write the current clock value
- * to the clock pointer and return 0 if the clock is in sync with the
- * external time source. If the clock mode is local it will return
- * -ENOSYS and -EAGAIN if the clock is not in sync with the external
- * reference. This function is what ETR is all about..
- */
-int get_sync_clock(unsigned long long *clock)
-{
- atomic_t *sw_ptr;
- unsigned int sw0, sw1;
-
- sw_ptr = &get_cpu_var(etr_sync_word);
- sw0 = atomic_read(sw_ptr);
- *clock = get_clock();
- sw1 = atomic_read(sw_ptr);
- put_cpu_var(etr_sync_sync);
- if (sw0 == sw1 && (sw0 & 0x80000000U))
- /* Success: time is in sync. */
- return 0;
- if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
- return -ENOSYS;
- if (test_bit(ETR_FLAG_EACCES, &etr_flags))
- return -EACCES;
- return -EAGAIN;
-}
-EXPORT_SYMBOL(get_sync_clock);
-
-/*
- * Make get_sync_clock return -EAGAIN.
- */
-static void etr_disable_sync_clock(void *dummy)
-{
- atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
- /*
- * Clear the in-sync bit 2^31. All get_sync_clock calls will
- * fail until the sync bit is turned back on. In addition
- * increase the "sequence" counter to avoid the race of an
- * etr event and the complete recovery against get_sync_clock.
- */
- atomic_clear_mask(0x80000000, sw_ptr);
- atomic_inc(sw_ptr);
-}
-
-/*
- * Make get_sync_clock return 0 again.
- * Needs to be called from a context disabled for preemption.
- */
-static void etr_enable_sync_clock(void)
-{
- atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
- atomic_set_mask(0x80000000, sw_ptr);
-}
-
-/*
* Reset ETR attachment.
*/
static void etr_reset(void)
@@ -381,15 +437,13 @@ static void etr_reset(void)
.e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
.p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
.es = 0, .sl = 0 };
- if (etr_setr(&etr_eacr) == 0)
+ if (etr_setr(&etr_eacr) == 0) {
etr_tolec = get_clock();
- else {
- set_bit(ETR_FLAG_ENOSYS, &etr_flags);
- if (etr_port0_online || etr_port1_online) {
- printk(KERN_WARNING "Running on non ETR capable "
- "machine, only local mode available.\n");
- etr_port0_online = etr_port1_online = 0;
- }
+ set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
+ } else if (etr_port0_online || etr_port1_online) {
+ printk(KERN_WARNING "Running on non ETR capable "
+ "machine, only local mode available.\n");
+ etr_port0_online = etr_port1_online = 0;
}
}
@@ -397,14 +451,12 @@ static int __init etr_init(void)
{
struct etr_aib aib;
- if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
+ if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
return 0;
/* Check if this machine has the steai instruction. */
if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
- set_bit(ETR_FLAG_STEAI, &etr_flags);
+ etr_steai_available = 1;
setup_timer(&etr_timer, etr_timeout, 0UL);
- if (!etr_port0_online && !etr_port1_online)
- set_bit(ETR_FLAG_EACCES, &etr_flags);
if (etr_port0_online) {
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
schedule_work(&etr_work);
@@ -435,7 +487,8 @@ void etr_switch_to_local(void)
{
if (!etr_eacr.sl)
return;
- etr_disable_sync_clock(NULL);
+ if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
+ disable_sync_clock(NULL);
set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
schedule_work(&etr_work);
}
@@ -450,23 +503,21 @@ void etr_sync_check(void)
{
if (!etr_eacr.es)
return;
- etr_disable_sync_clock(NULL);
+ if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
+ disable_sync_clock(NULL);
set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
schedule_work(&etr_work);
}
/*
- * ETR external interrupt. There are two causes:
+ * ETR timing alert. There are two causes:
* 1) port state change, check the usability of the port
* 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
* sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
* or ETR-data word 4 (edf4) has changed.
*/
-static void etr_ext_handler(__u16 code)
+static void etr_timing_alert(struct etr_irq_parm *intparm)
{
- struct etr_interruption_parameter *intparm =
- (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
-
if (intparm->pc0)
/* ETR port 0 state change. */
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
@@ -591,58 +642,23 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
return 1;
}
-/*
- * The time is "clock". old is what we think the time is.
- * Adjust the value by a multiple of jiffies and add the delta to ntp.
- * "delay" is an approximation how long the synchronization took. If
- * the time correction is positive, then "delay" is subtracted from
- * the time difference and only the remaining part is passed to ntp.
- */
-static unsigned long long etr_adjust_time(unsigned long long old,
- unsigned long long clock,
- unsigned long long delay)
-{
- unsigned long long delta, ticks;
- struct timex adjust;
-
- if (clock > old) {
- /* It is later than we thought. */
- delta = ticks = clock - old;
- delta = ticks = (delta < delay) ? 0 : delta - delay;
- delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
- adjust.offset = ticks * (1000000 / HZ);
- } else {
- /* It is earlier than we thought. */
- delta = ticks = old - clock;
- delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
- delta = -delta;
- adjust.offset = -ticks * (1000000 / HZ);
- }
- jiffies_timer_cc += delta;
- if (adjust.offset != 0) {
- printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
- adjust.offset);
- adjust.modes = ADJ_OFFSET_SINGLESHOT;
- do_adjtimex(&adjust);
- }
- return delta;
-}
-
-static struct {
+struct clock_sync_data {
int in_sync;
unsigned long long fixup_cc;
-} etr_sync;
+};
-static void etr_sync_cpu_start(void *dummy)
+static void clock_sync_cpu_start(void *dummy)
{
- etr_enable_sync_clock();
+ struct clock_sync_data *sync = dummy;
+
+ enable_sync_clock();
/*
* This looks like a busy wait loop but it isn't. etr_sync_cpus
* is called on all other cpus while the TOD clocks is stopped.
* __udelay will stop the cpu on an enabled wait psw until the
* TOD is running again.
*/
- while (etr_sync.in_sync == 0) {
+ while (sync->in_sync == 0) {
__udelay(1);
/*
* A different cpu changes *in_sync. Therefore use
@@ -650,17 +666,17 @@ static void etr_sync_cpu_start(void *dummy)
*/
barrier();
}
- if (etr_sync.in_sync != 1)
+ if (sync->in_sync != 1)
/* Didn't work. Clear per-cpu in sync bit again. */
- etr_disable_sync_clock(NULL);
+ disable_sync_clock(NULL);
/*
* This round of TOD syncing is done. Set the clock comparator
* to the next tick and let the processor continue.
*/
- fixup_clock_comparator(etr_sync.fixup_cc);
+ fixup_clock_comparator(sync->fixup_cc);
}
-static void etr_sync_cpu_end(void *dummy)
+static void clock_sync_cpu_end(void *dummy)
{
}
@@ -672,6 +688,7 @@ static void etr_sync_cpu_end(void *dummy)
static int etr_sync_clock(struct etr_aib *aib, int port)
{
struct etr_aib *sync_port;
+ struct clock_sync_data etr_sync;
unsigned long long clock, old_clock, delay, delta;
int follows;
int rc;
@@ -690,9 +707,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
*/
memset(&etr_sync, 0, sizeof(etr_sync));
preempt_disable();
- smp_call_function(etr_sync_cpu_start, NULL, 0, 0);
+ smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0);
local_irq_disable();
- etr_enable_sync_clock();
+ enable_sync_clock();
/* Set clock to next OTE. */
__ctl_set_bit(14, 21);
@@ -707,13 +724,13 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
/* Adjust Linux timing variables. */
delay = (unsigned long long)
(aib->edf2.etv - sync_port->edf2.etv) << 32;
- delta = etr_adjust_time(old_clock, clock, delay);
+ delta = adjust_time(old_clock, clock, delay);
etr_sync.fixup_cc = delta;
fixup_clock_comparator(delta);
/* Verify that the clock is properly set. */
if (!etr_aib_follows(sync_port, aib, port)) {
/* Didn't work. */
- etr_disable_sync_clock(NULL);
+ disable_sync_clock(NULL);
etr_sync.in_sync = -EAGAIN;
rc = -EAGAIN;
} else {
@@ -724,12 +741,12 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
/* Could not set the clock ?!? */
__ctl_clear_bit(0, 29);
__ctl_clear_bit(14, 21);
- etr_disable_sync_clock(NULL);
+ disable_sync_clock(NULL);
etr_sync.in_sync = -EAGAIN;
rc = -EAGAIN;
}
local_irq_enable();
- smp_call_function(etr_sync_cpu_end,NULL,0,0);
+ smp_call_function(clock_sync_cpu_end, NULL, 0, 0);
preempt_enable();
return rc;
}
@@ -832,7 +849,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
* Do not try to get the alternate port aib if the clock
* is not in sync yet.
*/
- if (!eacr.es)
+ if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es)
return eacr;
/*
@@ -840,7 +857,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
* the other port immediately. If only stetr is available the
* data-port bit toggle has to be used.
*/
- if (test_bit(ETR_FLAG_STEAI, &etr_flags)) {
+ if (etr_steai_available) {
if (eacr.p0 && !etr_port0_uptodate) {
etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
etr_port0_uptodate = 1;
@@ -909,10 +926,10 @@ static void etr_work_fn(struct work_struct *work)
if (!eacr.ea) {
/* Both ports offline. Reset everything. */
eacr.dp = eacr.es = eacr.sl = 0;
- on_each_cpu(etr_disable_sync_clock, NULL, 0, 1);
+ on_each_cpu(disable_sync_clock, NULL, 0, 1);
del_timer_sync(&etr_timer);
etr_update_eacr(eacr);
- set_bit(ETR_FLAG_EACCES, &etr_flags);
+ clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
return;
}
@@ -953,7 +970,6 @@ static void etr_work_fn(struct work_struct *work)
eacr.e1 = 1;
sync_port = (etr_port0_uptodate &&
etr_port_valid(&etr_port0, 0)) ? 0 : -1;
- clear_bit(ETR_FLAG_EACCES, &etr_flags);
} else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
eacr.sl = 0;
eacr.e0 = 0;
@@ -962,7 +978,6 @@ static void etr_work_fn(struct work_struct *work)
eacr.es = 0;
sync_port = (etr_port1_uptodate &&
etr_port_valid(&etr_port1, 1)) ? 1 : -1;
- clear_bit(ETR_FLAG_EACCES, &etr_flags);
} else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
eacr.sl = 1;
eacr.e0 = 1;
@@ -976,7 +991,6 @@ static void etr_work_fn(struct work_struct *work)
eacr.e1 = 1;
sync_port = (etr_port0_uptodate &&
etr_port_valid(&etr_port0, 0)) ? 0 : -1;
- clear_bit(ETR_FLAG_EACCES, &etr_flags);
} else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
eacr.sl = 1;
eacr.e0 = 0;
@@ -985,19 +999,22 @@ static void etr_work_fn(struct work_struct *work)
eacr.es = 0;
sync_port = (etr_port1_uptodate &&
etr_port_valid(&etr_port1, 1)) ? 1 : -1;
- clear_bit(ETR_FLAG_EACCES, &etr_flags);
} else {
/* Both ports not usable. */
eacr.es = eacr.sl = 0;
sync_port = -1;
- set_bit(ETR_FLAG_EACCES, &etr_flags);
+ clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
}
+ if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
+ eacr.es = 0;
+
/*
* If the clock is in sync just update the eacr and return.
* If there is no valid sync port wait for a port update.
*/
- if (eacr.es || sync_port < 0) {
+ if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) ||
+ eacr.es || sync_port < 0) {
etr_update_eacr(eacr);
etr_set_tolec_timeout(now);
return;
@@ -1018,11 +1035,13 @@ static void etr_work_fn(struct work_struct *work)
* and set up a timer to try again after 0.5 seconds
*/
etr_update_eacr(eacr);
+ set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
if (now < etr_tolec + (1600000 << 12) ||
etr_sync_clock(&aib, sync_port) != 0) {
/* Sync failed. Try again in 1/2 second. */
eacr.es = 0;
etr_update_eacr(eacr);
+ clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
etr_set_sync_timeout();
} else
etr_set_tolec_timeout(now);
@@ -1097,8 +1116,8 @@ static ssize_t etr_online_store(struct sys_device *dev,
value = simple_strtoul(buf, NULL, 0);
if (value != 0 && value != 1)
return -EINVAL;
- if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
- return -ENOSYS;
+ if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
+ return -EOPNOTSUPP;
if (dev == &etr_port0_dev) {
if (etr_port0_online == value)
return count; /* Nothing to do. */
@@ -1292,3 +1311,318 @@ out:
}
device_initcall(etr_init_sysfs);
+
+/*
+ * Server Time Protocol (STP) code.
+ */
+static int stp_online;
+static struct stp_sstpi stp_info;
+static void *stp_page;
+
+static void stp_work_fn(struct work_struct *work);
+static DECLARE_WORK(stp_work, stp_work_fn);
+
+static int __init early_parse_stp(char *p)
+{
+ if (strncmp(p, "off", 3) == 0)
+ stp_online = 0;
+ else if (strncmp(p, "on", 2) == 0)
+ stp_online = 1;
+ return 0;
+}
+early_param("stp", early_parse_stp);
+
+/*
+ * Reset STP attachment.
+ */
+static void stp_reset(void)
+{
+ int rc;
+
+ stp_page = alloc_bootmem_pages(PAGE_SIZE);
+ rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
+ if (rc == 1)
+ set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
+ else if (stp_online) {
+ printk(KERN_WARNING "Running on non STP capable machine.\n");
+ free_bootmem((unsigned long) stp_page, PAGE_SIZE);
+ stp_page = NULL;
+ stp_online = 0;
+ }
+}
+
+static int __init stp_init(void)
+{
+ if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online)
+ schedule_work(&stp_work);
+ return 0;
+}
+
+arch_initcall(stp_init);
+
+/*
+ * STP timing alert. There are three causes:
+ * 1) timing status change
+ * 2) link availability change
+ * 3) time control parameter change
+ * In all three cases we are only interested in the clock source state.
+ * If a STP clock source is now available use it.
+ */
+static void stp_timing_alert(struct stp_irq_parm *intparm)
+{
+ if (intparm->tsc || intparm->lac || intparm->tcpc)
+ schedule_work(&stp_work);
+}
+
+/*
+ * STP sync check machine check. This is called when the timing state
+ * changes from the synchronized state to the unsynchronized state.
+ * After a STP sync check the clock is not in sync. The machine check
+ * is broadcasted to all cpus at the same time.
+ */
+void stp_sync_check(void)
+{
+ if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
+ return;
+ disable_sync_clock(NULL);
+ schedule_work(&stp_work);
+}
+
+/*
+ * STP island condition machine check. This is called when an attached
+ * server attempts to communicate over an STP link and the servers
+ * have matching CTN ids and have a valid stratum-1 configuration
+ * but the configurations do not match.
+ */
+void stp_island_check(void)
+{
+ if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
+ return;
+ disable_sync_clock(NULL);
+ schedule_work(&stp_work);
+}
+
+/*
+ * STP tasklet. Check for the STP state and take over the clock
+ * synchronization if the STP clock source is usable.
+ */
+static void stp_work_fn(struct work_struct *work)
+{
+ struct clock_sync_data stp_sync;
+ unsigned long long old_clock, delta;
+ int rc;
+
+ if (!stp_online) {
+ chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
+ return;
+ }
+
+ rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
+ if (rc)
+ return;
+
+ rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
+ if (rc || stp_info.c == 0)
+ return;
+
+ /*
+ * Catch all other cpus and make them wait until we have
+ * successfully synced the clock. smp_call_function will
+ * return after all other cpus are in clock_sync_cpu_start.
+ */
+ memset(&stp_sync, 0, sizeof(stp_sync));
+ preempt_disable();
+ smp_call_function(clock_sync_cpu_start, &stp_sync, 0, 0);
+ local_irq_disable();
+ enable_sync_clock();
+
+ set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
+ if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
+ schedule_work(&etr_work);
+
+ rc = 0;
+ if (stp_info.todoff[0] || stp_info.todoff[1] ||
+ stp_info.todoff[2] || stp_info.todoff[3] ||
+ stp_info.tmd != 2) {
+ old_clock = get_clock();
+ rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
+ if (rc == 0) {
+ delta = adjust_time(old_clock, get_clock(), 0);
+ fixup_clock_comparator(delta);
+ rc = chsc_sstpi(stp_page, &stp_info,
+ sizeof(struct stp_sstpi));
+ if (rc == 0 && stp_info.tmd != 2)
+ rc = -EAGAIN;
+ }
+ }
+ if (rc) {
+ disable_sync_clock(NULL);
+ stp_sync.in_sync = -EAGAIN;
+ clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
+ if (etr_port0_online || etr_port1_online)
+ schedule_work(&etr_work);
+ } else
+ stp_sync.in_sync = 1;
+
+ local_irq_enable();
+ smp_call_function(clock_sync_cpu_end, NULL, 0, 0);
+ preempt_enable();
+}
+
+/*
+ * STP class sysfs interface functions
+ */
+static struct sysdev_class stp_sysclass = {
+ .name = "stp",
+};
+
+static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf)
+{
+ if (!stp_online)
+ return -ENODATA;
+ return sprintf(buf, "%016llx\n",
+ *(unsigned long long *) stp_info.ctnid);
+}
+
+static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
+
+static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf)
+{
+ if (!stp_online)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", stp_info.ctn);
+}
+
+static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
+
+static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf)
+{
+ if (!stp_online || !(stp_info.vbits & 0x2000))
+ return -ENODATA;
+ return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
+}
+
+static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
+
+static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf)
+{
+ if (!stp_online || !(stp_info.vbits & 0x8000))
+ return -ENODATA;
+ return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
+}
+
+static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
+
+static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf)
+{
+ if (!stp_online)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
+}
+
+static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
+
+static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf)
+{
+ if (!stp_online || !(stp_info.vbits & 0x0800))
+ return -ENODATA;
+ return sprintf(buf, "%i\n", (int) stp_info.tto);
+}
+
+static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
+
+static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf)
+{
+ if (!stp_online || !(stp_info.vbits & 0x4000))
+ return -ENODATA;
+ return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
+}
+
+static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
+ stp_time_zone_offset_show, NULL);
+
+static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf)
+{
+ if (!stp_online)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", stp_info.tmd);
+}
+
+static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
+
+static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf)
+{
+ if (!stp_online)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", stp_info.tst);
+}
+
+static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
+
+static ssize_t stp_online_show(struct sysdev_class *class, char *buf)
+{
+ return sprintf(buf, "%i\n", stp_online);
+}
+
+static ssize_t stp_online_store(struct sysdev_class *class,
+ const char *buf, size_t count)
+{
+ unsigned int value;
+
+ value = simple_strtoul(buf, NULL, 0);
+ if (value != 0 && value != 1)
+ return -EINVAL;
+ if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
+ return -EOPNOTSUPP;
+ stp_online = value;
+ schedule_work(&stp_work);
+ return count;
+}
+
+/*
+ * Can't use SYSDEV_CLASS_ATTR because the attribute should be named
+ * stp/online but attr_online already exists in this file ..
+ */
+static struct sysdev_class_attribute attr_stp_online = {
+ .attr = { .name = "online", .mode = 0600 },
+ .show = stp_online_show,
+ .store = stp_online_store,
+};
+
+static struct sysdev_class_attribute *stp_attributes[] = {
+ &attr_ctn_id,
+ &attr_ctn_type,
+ &attr_dst_offset,
+ &attr_leap_seconds,
+ &attr_stp_online,
+ &attr_stratum,
+ &attr_time_offset,
+ &attr_time_zone_offset,
+ &attr_timing_mode,
+ &attr_timing_state,
+ NULL
+};
+
+static int __init stp_init_sysfs(void)
+{
+ struct sysdev_class_attribute **attr;
+ int rc;
+
+ rc = sysdev_class_register(&stp_sysclass);
+ if (rc)
+ goto out;
+ for (attr = stp_attributes; *attr; attr++) {
+ rc = sysdev_class_create_file(&stp_sysclass, *attr);
+ if (rc)
+ goto out_unreg;
+ }
+ return 0;
+out_unreg:
+ for (; attr >= stp_attributes; attr--)
+ sysdev_class_remove_file(&stp_sysclass, *attr);
+ sysdev_class_unregister(&stp_sysclass);
+out:
+ return rc;
+}
+
+device_initcall(stp_init_sysfs);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 661a0721705..212d618b009 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -313,8 +313,6 @@ void __init s390_init_cpu_topology(void)
machine_has_topology_irq = 1;
tl_info = alloc_bootmem_pages(PAGE_SIZE);
- if (!tl_info)
- goto error;
info = tl_info;
stsi(info, 15, 1, 2);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index ca90ee3f930..0fa5dc5d68e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -136,7 +136,7 @@ static inline void set_vtimer(__u64 expires)
}
#endif
-static void start_cpu_timer(void)
+void vtime_start_cpu_timer(void)
{
struct vtimer_queue *vt_list;
@@ -150,7 +150,7 @@ static void start_cpu_timer(void)
set_vtimer(vt_list->idle);
}
-static void stop_cpu_timer(void)
+void vtime_stop_cpu_timer(void)
{
struct vtimer_queue *vt_list;
@@ -318,8 +318,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
spin_lock_irqsave(&vt_list->lock, flags);
- if (timer->cpu != smp_processor_id())
- printk("internal_add_vtimer: BUG, running on wrong CPU");
+ BUG_ON(timer->cpu != smp_processor_id());
/* if list is empty we only have to set the timer */
if (list_empty(&vt_list->list)) {
@@ -353,25 +352,12 @@ static void internal_add_vtimer(struct vtimer_list *timer)
put_cpu();
}
-static inline int prepare_vtimer(struct vtimer_list *timer)
+static inline void prepare_vtimer(struct vtimer_list *timer)
{
- if (!timer->function) {
- printk("add_virt_timer: uninitialized timer\n");
- return -EINVAL;
- }
-
- if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
- printk("add_virt_timer: invalid timer expire value!\n");
- return -EINVAL;
- }
-
- if (vtimer_pending(timer)) {
- printk("add_virt_timer: timer pending\n");
- return -EBUSY;
- }
-
+ BUG_ON(!timer->function);
+ BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
+ BUG_ON(vtimer_pending(timer));
timer->cpu = get_cpu();
- return 0;
}
/*
@@ -382,10 +368,7 @@ void add_virt_timer(void *new)
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
-
- if (prepare_vtimer(timer) < 0)
- return;
-
+ prepare_vtimer(timer);
timer->interval = 0;
internal_add_vtimer(timer);
}
@@ -399,10 +382,7 @@ void add_virt_timer_periodic(void *new)
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
-
- if (prepare_vtimer(timer) < 0)
- return;
-
+ prepare_vtimer(timer);
timer->interval = timer->expires;
internal_add_vtimer(timer);
}
@@ -423,15 +403,8 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
unsigned long flags;
int cpu;
- if (!timer->function) {
- printk("mod_virt_timer: uninitialized timer\n");
- return -EINVAL;
- }
-
- if (!expires || expires > VTIMER_MAX_SLICE) {
- printk("mod_virt_timer: invalid expire range\n");
- return -EINVAL;
- }
+ BUG_ON(!timer->function);
+ BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
/*
* This is a common optimization triggered by the
@@ -444,6 +417,9 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
cpu = get_cpu();
vt_list = &per_cpu(virt_cpu_timer, cpu);
+ /* check if we run on the right CPU */
+ BUG_ON(timer->cpu != cpu);
+
/* disable interrupts before test if timer is pending */
spin_lock_irqsave(&vt_list->lock, flags);
@@ -458,14 +434,6 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
return 0;
}
- /* check if we run on the right CPU */
- if (timer->cpu != cpu) {
- printk("mod_virt_timer: running on wrong CPU, check your code\n");
- spin_unlock_irqrestore(&vt_list->lock, flags);
- put_cpu();
- return -EINVAL;
- }
-
list_del_init(&timer->entry);
timer->expires = expires;
@@ -536,24 +504,6 @@ void init_cpu_vtimer(void)
}
-static int vtimer_idle_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- switch (action) {
- case S390_CPU_IDLE:
- stop_cpu_timer();
- break;
- case S390_CPU_NOT_IDLE:
- start_cpu_timer();
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block vtimer_idle_nb = {
- .notifier_call = vtimer_idle_notify,
-};
-
void __init vtime_init(void)
{
/* request the cpu timer external interrupt */
@@ -561,9 +511,6 @@ void __init vtime_init(void)
&ext_int_info_timer) != 0)
panic("Couldn't request external interrupt 0x1005");
- if (register_idle_notifier(&vtimer_idle_nb))
- panic("Couldn't register idle notifier");
-
/* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer();
}