aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/early.c74
-rw-r--r--arch/s390/kernel/entry.S16
-rw-r--r--arch/s390/kernel/entry64.S4
-rw-r--r--arch/s390/kernel/head.S1
-rw-r--r--arch/s390/kernel/head31.S1
-rw-r--r--arch/s390/kernel/head64.S9
-rw-r--r--arch/s390/kernel/ipl.c166
-rw-r--r--arch/s390/kernel/mcount.S147
-rw-r--r--arch/s390/kernel/mcount64.S78
-rw-r--r--arch/s390/kernel/setup.c10
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c39
-rw-r--r--arch/s390/kernel/suspend.c73
-rw-r--r--arch/s390/kernel/swsusp_asm64.S184
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/vmlinux.lds.S87
17 files changed, 604 insertions, 294 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c75ed43b1a1..c7be8e10b87 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o topology.o
-
+obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
@@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cae14c49951..bf8b4ae7ff2 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -6,6 +6,9 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
+#define KMSG_COMPONENT "setup"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -16,6 +19,7 @@
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
+#include <linux/kernel.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
#include <asm/lowcore.h>
@@ -35,8 +39,6 @@
char kernel_nss_name[NSS_NAME_SIZE + 1];
-static unsigned long machine_flags;
-
static void __init setup_boot_command_line(void);
/*
@@ -81,6 +83,8 @@ asm(
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n");
+static __initdata char upper_command_line[COMMAND_LINE_SIZE];
+
static noinline __init void create_kernel_nss(void)
{
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@@ -90,7 +94,6 @@ static noinline __init void create_kernel_nss(void)
int response;
size_t len;
char *savesys_ptr;
- char upper_command_line[COMMAND_LINE_SIZE];
char defsys_cmd[DEFSYS_CMD_SIZE];
char savesys_cmd[SAVESYS_CMD_SIZE];
@@ -141,6 +144,8 @@ static noinline __init void create_kernel_nss(void)
__cpcmd(defsys_cmd, NULL, 0, &response);
if (response != 0) {
+ pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
+ response);
kernel_nss_name[0] = '\0';
return;
}
@@ -153,8 +158,11 @@ static noinline __init void create_kernel_nss(void)
* max SAVESYS_CMD_SIZE
* On error: response contains the numeric portion of cp error message.
* for SAVESYS it will be >= 263
+ * for missing privilege class, it will be 1
*/
- if (response > SAVESYS_CMD_SIZE) {
+ if (response > SAVESYS_CMD_SIZE || response == 1) {
+ pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
+ response);
kernel_nss_name[0] = '\0';
return;
}
@@ -205,12 +213,9 @@ static noinline __init void detect_machine_type(void)
/* Running under KVM? If not we assume z/VM */
if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
- machine_flags |= MACHINE_FLAG_KVM;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else
- machine_flags |= MACHINE_FLAG_VM;
-
- /* Store machine flags for setting up lowcore early */
- S390_lowcore.machine_flags = machine_flags;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
static __init void early_pgm_check_handler(void)
@@ -245,7 +250,7 @@ static noinline __init void setup_hpage(void)
facilities = stfl();
if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29)))
return;
- machine_flags |= MACHINE_FLAG_HPAGE;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
__ctl_set_bit(0, 23);
#endif
}
@@ -263,7 +268,7 @@ static __init void detect_mvpg(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
if (!rc)
- machine_flags |= MACHINE_FLAG_MVPG;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
#endif
}
@@ -279,7 +284,7 @@ static __init void detect_ieee(void)
EX_TABLE(0b,1b)
: "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
if (!rc)
- machine_flags |= MACHINE_FLAG_IEEE;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
#endif
}
@@ -298,7 +303,7 @@ static __init void detect_csp(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
if (!rc)
- machine_flags |= MACHINE_FLAG_CSP;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
#endif
}
@@ -315,7 +320,7 @@ static __init void detect_diag9c(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
if (!rc)
- machine_flags |= MACHINE_FLAG_DIAG9C;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
}
static __init void detect_diag44(void)
@@ -330,7 +335,7 @@ static __init void detect_diag44(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
if (!rc)
- machine_flags |= MACHINE_FLAG_DIAG44;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
#endif
}
@@ -341,11 +346,11 @@ static __init void detect_machine_facilities(void)
facilities = stfl();
if (facilities & (1 << 28))
- machine_flags |= MACHINE_FLAG_IDTE;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (facilities & (1 << 23))
- machine_flags |= MACHINE_FLAG_PFMF;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
if (facilities & (1 << 4))
- machine_flags |= MACHINE_FLAG_MVCOS;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
#endif
}
@@ -367,21 +372,35 @@ static __init void rescue_initrd(void)
}
/* Set up boot command line */
-static void __init setup_boot_command_line(void)
+static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
{
- char *parm = NULL;
+ char *parm, *delim;
+ size_t rc, len;
+
+ len = strlen(boot_command_line);
+
+ delim = boot_command_line + len; /* '\0' character position */
+ parm = boot_command_line + len + 1; /* append right after '\0' */
+ rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
+ if (rc) {
+ if (*parm == '=')
+ memmove(boot_command_line, parm + 1, rc);
+ else
+ *delim = ' '; /* replace '\0' with space */
+ }
+}
+
+static void __init setup_boot_command_line(void)
+{
/* copy arch command line */
strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
/* append IPL PARM data to the boot command line */
- if (MACHINE_IS_VM) {
- parm = boot_command_line + strlen(boot_command_line);
- *parm++ = ' ';
- get_ipl_vmparm(parm);
- if (parm[0] == '=')
- memmove(boot_command_line, parm + 1, strlen(parm));
- }
+ if (MACHINE_IS_VM)
+ append_to_cmdline(append_ipl_vmparm);
+
+ append_to_cmdline(append_ipl_scpdata);
}
@@ -413,7 +432,6 @@ void __init startup_init(void)
setup_hpage();
sclp_facilities_detect();
detect_memory_layout(memory_chunk);
- S390_lowcore.machine_flags = machine_flags;
#ifdef CONFIG_DYNAMIC_FTRACE
S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
#endif
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c4c80a22bc1..f78580a7403 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -278,7 +278,8 @@ sysc_return:
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_restore:
#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(sysc_restore_trace_psw)
+ la %r1,BASED(sysc_restore_trace_psw_addr)
+ l %r1,0(%r1)
lpsw 0(%r1)
sysc_restore_trace:
TRACE_IRQS_CHECK
@@ -289,10 +290,15 @@ sysc_leave:
sysc_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+sysc_restore_trace_psw_addr:
+ .long sysc_restore_trace_psw
+
+ .section .data,"aw",@progbits
.align 8
.globl sysc_restore_trace_psw
sysc_restore_trace_psw:
.long 0, sysc_restore_trace + 0x80000000
+ .previous
#endif
#
@@ -606,7 +612,8 @@ io_return:
bnz BASED(io_work) # there is work to do (signals etc.)
io_restore:
#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(io_restore_trace_psw)
+ la %r1,BASED(io_restore_trace_psw_addr)
+ l %r1,0(%r1)
lpsw 0(%r1)
io_restore_trace:
TRACE_IRQS_CHECK
@@ -617,10 +624,15 @@ io_leave:
io_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+io_restore_trace_psw_addr:
+ .long io_restore_trace_psw
+
+ .section .data,"aw",@progbits
.align 8
.globl io_restore_trace_psw
io_restore_trace_psw:
.long 0, io_restore_trace + 0x80000000
+ .previous
#endif
#
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f6618e9e15e..009ca6175db 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -284,10 +284,12 @@ sysc_leave:
sysc_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+ .section .data,"aw",@progbits
.align 8
.globl sysc_restore_trace_psw
sysc_restore_trace_psw:
.quad 0, sysc_restore_trace
+ .previous
#endif
#
@@ -595,10 +597,12 @@ io_leave:
io_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+ .section .data,"aw",@progbits
.align 8
.globl io_restore_trace_psw
io_restore_trace_psw:
.quad 0, io_restore_trace
+ .previous
#endif
#
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ec688234852..c52b4f7742f 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -27,6 +27,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/cpu.h>
#ifdef CONFIG_64BIT
#define ARCH_OFFSET 4
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 2ced846065b..602b508cd4c 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -24,6 +24,7 @@ startup_continue:
# Setup stack
#
l %r15,.Linittu-.LPG1(%r13)
+ st %r15,__LC_THREAD_INFO # cache thread info in lowcore
mvc __LC_CURRENT(4),__TI_task(%r15)
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
st %r15,__LC_KERNEL_STACK # set end of kernel stack
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 65667b2e65c..6a250808092 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -62,9 +62,9 @@ startup_continue:
clr %r11,%r12
je 5f # no more space in prefix array
4:
- ahi %r8,1 # next cpu (r8 += 1)
- cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ?
- jl 1b # jump if not last cpu
+ ahi %r8,1 # next cpu (r8 += 1)
+ chi %r8,MAX_CPU_ADDRESS # is last possible cpu ?
+ jle 1b # jump if not last cpu
5:
lhi %r1,2 # mode 2 = esame (dump)
j 6f
@@ -92,6 +92,7 @@ startup_continue:
# Setup stack
#
larl %r15,init_thread_union
+ stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
lg %r14,__TI_task(%r15) # cache current in lowcore
stg %r14,__LC_CURRENT
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
@@ -129,8 +130,6 @@ startup_continue:
#ifdef CONFIG_ZFCPDUMP
.Lcurrent_cpu:
.long 0x0
-.Llast_cpu:
- .long 0x0000ffff
.Lpref_arr_ptr:
.long zfcpdump_prefix_array
#endif /* CONFIG_ZFCPDUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 371a2d88f4a..ee57a42e6e9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -272,17 +272,18 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
/* VM IPL PARM routines */
-static void reipl_get_ascii_vmparm(char *dest,
+size_t reipl_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
int i;
- int len = 0;
+ size_t len;
char has_lowercase = 0;
+ len = 0;
if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
(ipb->ipl_info.ccw.vm_parm_len > 0)) {
- len = ipb->ipl_info.ccw.vm_parm_len;
+ len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
@@ -299,14 +300,20 @@ static void reipl_get_ascii_vmparm(char *dest,
EBCASC(dest, len);
}
dest[len] = 0;
+
+ return len;
}
-void get_ipl_vmparm(char *dest)
+size_t append_ipl_vmparm(char *dest, size_t size)
{
+ size_t rc;
+
+ rc = 0;
if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
- reipl_get_ascii_vmparm(dest, &ipl_block);
+ rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
else
dest[0] = 0;
+ return rc;
}
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
@@ -314,10 +321,65 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj,
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
- get_ipl_vmparm(parm);
+ append_ipl_vmparm(parm, sizeof(parm));
return sprintf(page, "%s\n", parm);
}
+static size_t scpdata_length(const char* buf, size_t count)
+{
+ while (count) {
+ if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
+ break;
+ count--;
+ }
+ return count;
+}
+
+size_t reipl_append_ascii_scpdata(char *dest, size_t size,
+ const struct ipl_parameter_block *ipb)
+{
+ size_t count;
+ size_t i;
+ int has_lowercase;
+
+ count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
+ ipb->ipl_info.fcp.scp_data_len));
+ if (!count)
+ goto out;
+
+ has_lowercase = 0;
+ for (i = 0; i < count; i++) {
+ if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
+ count = 0;
+ goto out;
+ }
+ if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
+ has_lowercase = 1;
+ }
+
+ if (has_lowercase)
+ memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
+ else
+ for (i = 0; i < count; i++)
+ dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
+out:
+ dest[count] = '\0';
+ return count;
+}
+
+size_t append_ipl_scpdata(char *dest, size_t len)
+{
+ size_t rc;
+
+ rc = 0;
+ if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
+ rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
+ else
+ dest[0] = 0;
+ return rc;
+}
+
+
static struct kobj_attribute sys_ipl_vm_parm_attr =
__ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
@@ -553,7 +615,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
{
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
- reipl_get_ascii_vmparm(vmparm, ipb);
+ reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
return sprintf(page, "%s\n", vmparm);
}
@@ -626,6 +688,59 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
/* FCP reipl device attributes */
+static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len;
+ void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data;
+
+ return memory_read_from_buffer(buf, count, &off, scp_data, size);
+}
+
+static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t padding;
+ size_t scpdata_len;
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off >= DIAG308_SCPDATA_SIZE)
+ return -ENOSPC;
+
+ if (count > DIAG308_SCPDATA_SIZE - off)
+ count = DIAG308_SCPDATA_SIZE - off;
+
+ memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
+ scpdata_len = off + count;
+
+ if (scpdata_len % 8) {
+ padding = 8 - (scpdata_len % 8);
+ memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
+ 0, padding);
+ scpdata_len += padding;
+ }
+
+ reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len;
+ reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len;
+ reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len;
+
+ return count;
+}
+
+static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
+ .attr = {
+ .name = "scp_data",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .size = PAGE_SIZE,
+ .read = reipl_fcp_scpdata_read,
+ .write = reipl_fcp_scpdata_write,
+};
+
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
reipl_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
@@ -647,7 +762,6 @@ static struct attribute *reipl_fcp_attrs[] = {
};
static struct attribute_group reipl_fcp_attr_group = {
- .name = IPL_FCP_STR,
.attrs = reipl_fcp_attrs,
};
@@ -895,6 +1009,7 @@ static struct kobj_attribute reipl_type_attr =
__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
static struct kset *reipl_kset;
+static struct kset *reipl_fcp_kset;
static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
const enum ipl_method m)
@@ -906,7 +1021,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
reipl_get_ascii_loadparm(loadparm, ipb);
reipl_get_ascii_nss_name(nss_name, ipb);
- reipl_get_ascii_vmparm(vmparm, ipb);
+ reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
switch (m) {
case REIPL_METHOD_CCW_VM:
@@ -1076,23 +1191,44 @@ static int __init reipl_fcp_init(void)
int rc;
if (!diag308_set_works) {
- if (ipl_info.type == IPL_TYPE_FCP)
+ if (ipl_info.type == IPL_TYPE_FCP) {
make_attrs_ro(reipl_fcp_attrs);
- else
+ sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO;
+ } else
return 0;
}
reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_fcp)
return -ENOMEM;
- rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group);
+
+ /* sysfs: create fcp kset for mixing attr group and bin attrs */
+ reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL,
+ &reipl_kset->kobj);
+ if (!reipl_kset) {
+ free_page((unsigned long) reipl_block_fcp);
+ return -ENOMEM;
+ }
+
+ rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
+ if (rc) {
+ kset_unregister(reipl_fcp_kset);
+ free_page((unsigned long) reipl_block_fcp);
+ return rc;
+ }
+
+ rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj,
+ &sys_reipl_fcp_scp_data_attr);
if (rc) {
- free_page((unsigned long)reipl_block_fcp);
+ sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
+ kset_unregister(reipl_fcp_kset);
+ free_page((unsigned long) reipl_block_fcp);
return rc;
}
- if (ipl_info.type == IPL_TYPE_FCP) {
+
+ if (ipl_info.type == IPL_TYPE_FCP)
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
- } else {
+ else {
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 2a0a5e97ba8..dfe015d7398 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -11,111 +11,27 @@
ftrace_stub:
br %r14
-#ifdef CONFIG_64BIT
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
.globl _mcount
_mcount:
- br %r14
-
- .globl ftrace_caller
-ftrace_caller:
- larl %r1,function_trace_stop
- icm %r1,0xf,0(%r1)
- bnzr %r14
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- lgr %r2,%r14
- lg %r3,168(%r15)
- larl %r14,ftrace_dyn_func
- lg %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl ftrace_graph_caller
-ftrace_graph_caller:
- # This unconditional branch gets runtime patched. Change only if
- # you know what you are doing. See ftrace_enable_graph_caller().
- j 0f
- lg %r2,272(%r15)
- lg %r3,168(%r15)
- brasl %r14,prepare_ftrace_return
- stg %r2,168(%r15)
-0:
-#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
+#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
.data
.globl ftrace_dyn_func
ftrace_dyn_func:
- .quad ftrace_stub
+ .long ftrace_stub
.previous
-#else /* CONFIG_DYNAMIC_FTRACE */
-
- .globl _mcount
-_mcount:
- larl %r1,function_trace_stop
- icm %r1,0xf,0(%r1)
- bnzr %r14
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- lgr %r2,%r14
- lg %r3,168(%r15)
- larl %r14,ftrace_trace_function
- lg %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- lg %r2,272(%r15)
- lg %r3,168(%r15)
- brasl %r14,prepare_ftrace_return
- stg %r2,168(%r15)
-#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
- br %r14
-
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
- .globl return_to_handler
-return_to_handler:
- stmg %r2,%r5,32(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- brasl %r14,ftrace_return_to_handler
- aghi %r15,160
- lgr %r14,%r2
- lmg %r2,%r5,32(%r15)
- br %r14
-
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#else /* CONFIG_64BIT */
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
- .globl _mcount
-_mcount:
- br %r14
-
.globl ftrace_caller
ftrace_caller:
+#endif
stm %r2,%r5,16(%r15)
bras %r1,2f
+#ifdef CONFIG_DYNAMIC_FTRACE
+0: .long ftrace_dyn_func
+#else
0: .long ftrace_trace_function
+#endif
1: .long function_trace_stop
2: l %r2,1b-0b(%r1)
icm %r2,0xf,0(%r2)
@@ -131,53 +47,13 @@ ftrace_caller:
l %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
.globl ftrace_graph_caller
ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if
# you know what you are doing. See ftrace_enable_graph_caller().
j 1f
- bras %r1,0f
- .long prepare_ftrace_return
-0: l %r2,152(%r15)
- l %r4,0(%r1)
- l %r3,100(%r15)
- basr %r14,%r4
- st %r2,100(%r15)
-1:
#endif
- ahi %r15,96
- l %r14,56(%r15)
-3: lm %r2,%r5,16(%r15)
- br %r14
-
- .data
- .globl ftrace_dyn_func
-ftrace_dyn_func:
- .long ftrace_stub
- .previous
-
-#else /* CONFIG_DYNAMIC_FTRACE */
-
- .globl _mcount
-_mcount:
- stm %r2,%r5,16(%r15)
- bras %r1,2f
-0: .long ftrace_trace_function
-1: .long function_trace_stop
-2: l %r2,1b-0b(%r1)
- icm %r2,0xf,0(%r2)
- jnz 3f
- st %r14,56(%r15)
- lr %r0,%r15
- ahi %r15,-96
- l %r3,100(%r15)
- la %r2,0(%r14)
- st %r0,__SF_BACKCHAIN(%r15)
- la %r3,0(%r3)
- l %r14,0b-0b(%r1)
- l %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
bras %r1,0f
.long prepare_ftrace_return
0: l %r2,152(%r15)
@@ -185,14 +61,13 @@ _mcount:
l %r3,100(%r15)
basr %r14,%r4
st %r2,100(%r15)
+1:
#endif
ahi %r15,96
l %r14,56(%r15)
3: lm %r2,%r5,16(%r15)
br %r14
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
@@ -211,6 +86,4 @@ return_to_handler:
lm %r2,%r5,16(%r15)
br %r14
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#endif /* CONFIG_64BIT */
+#endif
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
new file mode 100644
index 00000000000..c37211c6092
--- /dev/null
+++ b/arch/s390/kernel/mcount64.S
@@ -0,0 +1,78 @@
+/*
+ * Copyright IBM Corp. 2008,2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#include <asm/asm-offsets.h>
+
+ .globl ftrace_stub
+ftrace_stub:
+ br %r14
+
+ .globl _mcount
+_mcount:
+#ifdef CONFIG_DYNAMIC_FTRACE
+ br %r14
+
+ .data
+ .globl ftrace_dyn_func
+ftrace_dyn_func:
+ .quad ftrace_stub
+ .previous
+
+ .globl ftrace_caller
+ftrace_caller:
+#endif
+ larl %r1,function_trace_stop
+ icm %r1,0xf,0(%r1)
+ bnzr %r14
+ stmg %r2,%r5,32(%r15)
+ stg %r14,112(%r15)
+ lgr %r1,%r15
+ aghi %r15,-160
+ stg %r1,__SF_BACKCHAIN(%r15)
+ lgr %r2,%r14
+ lg %r3,168(%r15)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ larl %r14,ftrace_dyn_func
+#else
+ larl %r14,ftrace_trace_function
+#endif
+ lg %r14,0(%r14)
+ basr %r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+ .globl ftrace_graph_caller
+ftrace_graph_caller:
+ # This unconditional branch gets runtime patched. Change only if
+ # you know what you are doing. See ftrace_enable_graph_caller().
+ j 0f
+#endif
+ lg %r2,272(%r15)
+ lg %r3,168(%r15)
+ brasl %r14,prepare_ftrace_return
+ stg %r2,168(%r15)
+0:
+#endif
+ aghi %r15,160
+ lmg %r2,%r5,32(%r15)
+ lg %r14,112(%r15)
+ br %r14
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+ .globl return_to_handler
+return_to_handler:
+ stmg %r2,%r5,32(%r15)
+ lgr %r1,%r15
+ aghi %r15,-160
+ stg %r1,__SF_BACKCHAIN(%r15)
+ brasl %r14,ftrace_return_to_handler
+ aghi %r15,160
+ lgr %r14,%r2
+ lmg %r2,%r5,32(%r15)
+ br %r14
+
+#endif
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index cbb897bc50b..9ed13a1ed37 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -156,15 +156,11 @@ __setup("condev=", condev_setup);
static void __init set_preferred_console(void)
{
- if (MACHINE_IS_KVM) {
+ if (MACHINE_IS_KVM)
add_preferred_console("hvc", 0, NULL);
- s390_virtio_console_init();
- return;
- }
-
- if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
+ else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
add_preferred_console("ttyS", 0, NULL);
- if (CONSOLE_IS_3270)
+ else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
}
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 062bd64e65f..6b4fef877f9 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -536,4 +536,6 @@ void do_notify_resume(struct pt_regs *regs)
{
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index be2cae08340..56c16876b91 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -49,6 +49,7 @@
#include <asm/sclp.h>
#include <asm/cputime.h>
#include <asm/vdso.h>
+#include <asm/cpu.h>
#include "entry.h"
static struct task_struct *current_set[NR_CPUS];
@@ -70,6 +71,23 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void smp_ext_bitcall(int, ec_bit_sig);
+static int cpu_stopped(int cpu)
+{
+ __u32 status;
+
+ switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
+ case sigp_order_code_accepted:
+ case sigp_status_stored:
+ /* Check for stopped and check stop state */
+ if (status & 0x50)
+ return 1;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
void smp_send_stop(void)
{
int cpu, rc;
@@ -86,7 +104,7 @@ void smp_send_stop(void)
rc = signal_processor(cpu, sigp_stop);
} while (rc == sigp_busy);
- while (!smp_cpu_not_running(cpu))
+ while (!cpu_stopped(cpu))
cpu_relax();
}
}
@@ -269,19 +287,6 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
#endif /* CONFIG_ZFCPDUMP */
-static int cpu_stopped(int cpu)
-{
- __u32 status;
-
- /* Check for stopped state */
- if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
- sigp_status_stored) {
- if (status & 0x40)
- return 1;
- }
- return 0;
-}
-
static int cpu_known(int cpu_id)
{
int cpu;
@@ -300,7 +305,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
logical_cpu = cpumask_first(&avail);
if (logical_cpu >= nr_cpu_ids)
return 0;
- for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
+ for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
@@ -379,7 +384,7 @@ static void __init smp_detect_cpus(void)
/* Use sigp detection algorithm if sclp doesn't work. */
if (sclp_get_cpu_info(info)) {
smp_use_sigp_detection = 1;
- for (cpu = 0; cpu <= 65535; cpu++) {
+ for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
if (cpu == boot_cpu_addr)
continue;
__cpu_logical_map[CPU_INIT_NO] = cpu;
@@ -635,7 +640,7 @@ int __cpu_disable(void)
void __cpu_die(unsigned int cpu)
{
/* Wait until target cpu is down */
- while (!smp_cpu_not_running(cpu))
+ while (!cpu_stopped(cpu))
cpu_relax();
smp_free_lowcore(cpu);
pr_info("Processor %d stopped\n", cpu);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
new file mode 100644
index 00000000000..086bee970ca
--- /dev/null
+++ b/arch/s390/kernel/suspend.c
@@ -0,0 +1,73 @@
+/*
+ * Suspend support specific for s390.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ */
+
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/pfn.h>
+#include <linux/mm.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/ipl.h>
+
+/*
+ * References to section boundaries
+ */
+extern const void __nosave_begin, __nosave_end;
+
+/*
+ * check if given pfn is in the 'nosave' or in the read only NSS section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
+ >> PAGE_SHIFT;
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ if (pfn >= stext_pfn && pfn <= eshared_pfn) {
+ if (ipl_info.type == IPL_TYPE_NSS)
+ return 1;
+ } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
+ return 1;
+ return 0;
+}
+
+void save_processor_state(void)
+{
+ /* swsusp_arch_suspend() actually saves all cpu register contents.
+ * Machine checks must be disabled since swsusp_arch_suspend() stores
+ * register contents to their lowcore save areas. That's the same
+ * place where register contents on machine checks would be saved.
+ * To avoid register corruption disable machine checks.
+ * We must also disable machine checks in the new psw mask for
+ * program checks, since swsusp_arch_suspend() may generate program
+ * checks. Disabling machine checks for all other new psw masks is
+ * just paranoia.
+ */
+ local_mcck_disable();
+ /* Disable lowcore protection */
+ __ctl_clear_bit(0,28);
+ S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
+}
+
+void restore_processor_state(void)
+{
+ S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
+ /* Enable lowcore protection */
+ __ctl_set_bit(0,28);
+ local_mcck_enable();
+}
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
new file mode 100644
index 00000000000..7cd6b096f0d
--- /dev/null
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -0,0 +1,184 @@
+/*
+ * S390 64-bit swsusp implementation
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ * Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * Save register context in absolute 0 lowcore and call swsusp_save() to
+ * create in-memory kernel image. The context is saved in the designated
+ * "store status" memory locations (see POP).
+ * We return from this function twice. The first time during the suspend to
+ * disk process. The second time via the swsusp_arch_resume() function
+ * (see below) in the resume process.
+ * This function runs with disabled interrupts.
+ */
+ .section .text
+ .align 4
+ .globl swsusp_arch_suspend
+swsusp_arch_suspend:
+ stmg %r6,%r15,__SF_GPRS(%r15)
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r1,__SF_BACKCHAIN(%r15)
+
+ /* Deactivate DAT */
+ stnsm __SF_EMPTY(%r15),0xfb
+
+ /* Store prefix register on stack */
+ stpx __SF_EMPTY(%r15)
+
+ /* Save prefix register contents for lowcore */
+ llgf %r4,__SF_EMPTY(%r15)
+
+ /* Get pointer to save area */
+ lghi %r1,0x1000
+
+ /* Store registers */
+ mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
+ stfpc 0x31c(%r1) /* store fpu control */
+ std 0,0x200(%r1) /* store f0 */
+ std 1,0x208(%r1) /* store f1 */
+ std 2,0x210(%r1) /* store f2 */
+ std 3,0x218(%r1) /* store f3 */
+ std 4,0x220(%r1) /* store f4 */
+ std 5,0x228(%r1) /* store f5 */
+ std 6,0x230(%r1) /* store f6 */
+ std 7,0x238(%r1) /* store f7 */
+ std 8,0x240(%r1) /* store f8 */
+ std 9,0x248(%r1) /* store f9 */
+ std 10,0x250(%r1) /* store f10 */
+ std 11,0x258(%r1) /* store f11 */
+ std 12,0x260(%r1) /* store f12 */
+ std 13,0x268(%r1) /* store f13 */
+ std 14,0x270(%r1) /* store f14 */
+ std 15,0x278(%r1) /* store f15 */
+ stam %a0,%a15,0x340(%r1) /* store access registers */
+ stctg %c0,%c15,0x380(%r1) /* store control registers */
+ stmg %r0,%r15,0x280(%r1) /* store general registers */
+
+ stpt 0x328(%r1) /* store timer */
+ stckc 0x330(%r1) /* store clock comparator */
+
+ /* Activate DAT */
+ stosm __SF_EMPTY(%r15),0x04
+
+ /* Set prefix page to zero */
+ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+ spx __SF_EMPTY(%r15)
+
+ lghi %r2,0
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
+ /* Save image */
+ brasl %r14,swsusp_save
+
+ /* Restore prefix register and return */
+ lghi %r1,0x1000
+ spx 0x318(%r1)
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+ br %r14
+
+/*
+ * Restore saved memory image to correct place and restore register context.
+ * Then we return to the function that called swsusp_arch_suspend().
+ * swsusp_arch_resume() runs with disabled interrupts.
+ */
+ .globl swsusp_arch_resume
+swsusp_arch_resume:
+ stmg %r6,%r15,__SF_GPRS(%r15)
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r1,__SF_BACKCHAIN(%r15)
+
+#ifdef CONFIG_SMP
+ /* Save boot cpu number */
+ brasl %r14,smp_get_phys_cpu_id
+ lgr %r10,%r2
+#endif
+ /* Deactivate DAT */
+ stnsm __SF_EMPTY(%r15),0xfb
+
+ /* Set prefix page to zero */
+ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+ spx __SF_EMPTY(%r15)
+
+ /* Restore saved image */
+ larl %r1,restore_pblist
+ lg %r1,0(%r1)
+ ltgr %r1,%r1
+ jz 2f
+0:
+ lg %r2,8(%r1)
+ lg %r4,0(%r1)
+ lghi %r3,PAGE_SIZE
+ lghi %r5,PAGE_SIZE
+1:
+ mvcle %r2,%r4,0
+ jo 1b
+ lg %r1,16(%r1)
+ ltgr %r1,%r1
+ jnz 0b
+2:
+ ptlb /* flush tlb */
+
+ /* Restore registers */
+ lghi %r13,0x1000 /* %r1 = pointer to save arae */
+
+ spt 0x328(%r13) /* reprogram timer */
+ //sckc 0x330(%r13) /* set clock comparator */
+
+ lctlg %c0,%c15,0x380(%r13) /* load control registers */
+ lam %a0,%a15,0x340(%r13) /* load access registers */
+
+ lfpc 0x31c(%r13) /* load fpu control */
+ ld 0,0x200(%r13) /* load f0 */
+ ld 1,0x208(%r13) /* load f1 */
+ ld 2,0x210(%r13) /* load f2 */
+ ld 3,0x218(%r13) /* load f3 */
+ ld 4,0x220(%r13) /* load f4 */
+ ld 5,0x228(%r13) /* load f5 */
+ ld 6,0x230(%r13) /* load f6 */
+ ld 7,0x238(%r13) /* load f7 */
+ ld 8,0x240(%r13) /* load f8 */
+ ld 9,0x248(%r13) /* load f9 */
+ ld 10,0x250(%r13) /* load f10 */
+ ld 11,0x258(%r13) /* load f11 */
+ ld 12,0x260(%r13) /* load f12 */
+ ld 13,0x268(%r13) /* load f13 */
+ ld 14,0x270(%r13) /* load f14 */
+ ld 15,0x278(%r13) /* load f15 */
+
+ /* Load old stack */
+ lg %r15,0x2f8(%r13)
+
+ /* Pointer to save area */
+ lghi %r13,0x1000
+
+#ifdef CONFIG_SMP
+ /* Switch CPUs */
+ lgr %r2,%r10 /* get cpu id */
+ llgf %r3,0x318(%r13)
+ brasl %r14,smp_switch_boot_cpu_in_resume
+#endif
+ /* Restore prefix register */
+ spx 0x318(%r13)
+
+ /* Activate DAT */
+ stosm __SF_EMPTY(%r15),0x04
+
+ /* Return 0 */
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+ br %r14
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4c8e9c47c8..54e327e9af0 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -60,6 +60,7 @@
#define TICK_SIZE tick
u64 sched_clock_base_cc = -1; /* Force to data section. */
+EXPORT_SYMBOL_GPL(sched_clock_base_cc);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace sched_clock(void)
{
- return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
+ return (get_clock_monotonic() * 125) >> 9;
}
/*
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a53db23ee09..7315f9e67e1 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -52,55 +52,18 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_eshared = .; /* End of shareable data */
- . = ALIGN(16); /* Exception table */
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- } :data
-
- .data : { /* Data */
- DATA_DATA
- CONSTRUCTORS
- }
-
- . = ALIGN(PAGE_SIZE);
- .data_nosave : {
- __nosave_begin = .;
- *(.data.nosave)
- }
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
- . = ALIGN(PAGE_SIZE);
- .data.page_aligned : {
- *(.data.idt)
- }
+ EXCEPTION_TABLE(16) :data
- . = ALIGN(0x100);
- .data.cacheline_aligned : {
- *(.data.cacheline_aligned)
- }
+ RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
- . = ALIGN(0x100);
- .data.read_mostly : {
- *(.data.read_mostly)
- }
_edata = .; /* End of data section */
- . = ALIGN(THREAD_SIZE); /* init_task */
- .data.init_task : {
- *(.data.init_task)
- }
-
/* will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
+
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
/*
* .exit.text is discarded at runtime, not link time,
* to deal with references from __bug_table
@@ -111,49 +74,13 @@ SECTIONS
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
- .init.data : {
- INIT_DATA
- }
- . = ALIGN(0x100);
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
-
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
- SECURITY_INIT
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(0x100);
- .init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- . = ALIGN(2);
- __initramfs_end = .;
- }
-#endif
+ INIT_DATA_SECTION(0x100)
PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
- /* BSS */
- .bss : {
- __bss_start = .;
- *(.bss)
- . = ALIGN(2);
- __bss_stop = .;
- }
+ BSS_SECTION(0, 2, 0)
_end = . ;